1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/gc.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/module.h> 10 #include <linux/backing-dev.h> 11 #include <linux/init.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/kthread.h> 14 #include <linux/delay.h> 15 #include <linux/freezer.h> 16 17 #include "f2fs.h" 18 #include "node.h" 19 #include "segment.h" 20 #include "gc.h" 21 #include <trace/events/f2fs.h> 22 23 static int gc_thread_func(void *data) 24 { 25 struct f2fs_sb_info *sbi = data; 26 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; 27 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; 28 unsigned int wait_ms; 29 30 wait_ms = gc_th->min_sleep_time; 31 32 set_freezable(); 33 do { 34 wait_event_interruptible_timeout(*wq, 35 kthread_should_stop() || freezing(current) || 36 gc_th->gc_wake, 37 msecs_to_jiffies(wait_ms)); 38 39 /* give it a try one time */ 40 if (gc_th->gc_wake) 41 gc_th->gc_wake = 0; 42 43 if (try_to_freeze()) { 44 stat_other_skip_bggc_count(sbi); 45 continue; 46 } 47 if (kthread_should_stop()) 48 break; 49 50 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { 51 increase_sleep_time(gc_th, &wait_ms); 52 stat_other_skip_bggc_count(sbi); 53 continue; 54 } 55 56 if (time_to_inject(sbi, FAULT_CHECKPOINT)) { 57 f2fs_show_injection_info(FAULT_CHECKPOINT); 58 f2fs_stop_checkpoint(sbi, false); 59 } 60 61 if (!sb_start_write_trylock(sbi->sb)) { 62 stat_other_skip_bggc_count(sbi); 63 continue; 64 } 65 66 /* 67 * [GC triggering condition] 68 * 0. GC is not conducted currently. 69 * 1. There are enough dirty segments. 70 * 2. IO subsystem is idle by checking the # of writeback pages. 71 * 3. IO subsystem is idle by checking the # of requests in 72 * bdev's request list. 73 * 74 * Note) We have to avoid triggering GCs frequently. 75 * Because it is possible that some segments can be 76 * invalidated soon after by user update or deletion. 77 * So, I'd like to wait some time to collect dirty segments. 78 */ 79 if (sbi->gc_mode == GC_URGENT) { 80 wait_ms = gc_th->urgent_sleep_time; 81 mutex_lock(&sbi->gc_mutex); 82 goto do_gc; 83 } 84 85 if (!mutex_trylock(&sbi->gc_mutex)) { 86 stat_other_skip_bggc_count(sbi); 87 goto next; 88 } 89 90 if (!is_idle(sbi, GC_TIME)) { 91 increase_sleep_time(gc_th, &wait_ms); 92 mutex_unlock(&sbi->gc_mutex); 93 stat_io_skip_bggc_count(sbi); 94 goto next; 95 } 96 97 if (has_enough_invalid_blocks(sbi)) 98 decrease_sleep_time(gc_th, &wait_ms); 99 else 100 increase_sleep_time(gc_th, &wait_ms); 101 do_gc: 102 stat_inc_bggc_count(sbi); 103 104 /* if return value is not zero, no victim was selected */ 105 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO)) 106 wait_ms = gc_th->no_gc_sleep_time; 107 108 trace_f2fs_background_gc(sbi->sb, wait_ms, 109 prefree_segments(sbi), free_segments(sbi)); 110 111 /* balancing f2fs's metadata periodically */ 112 f2fs_balance_fs_bg(sbi); 113 next: 114 sb_end_write(sbi->sb); 115 116 } while (!kthread_should_stop()); 117 return 0; 118 } 119 120 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi) 121 { 122 struct f2fs_gc_kthread *gc_th; 123 dev_t dev = sbi->sb->s_bdev->bd_dev; 124 int err = 0; 125 126 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL); 127 if (!gc_th) { 128 err = -ENOMEM; 129 goto out; 130 } 131 132 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME; 133 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME; 134 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME; 135 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME; 136 137 gc_th->gc_wake= 0; 138 139 sbi->gc_thread = gc_th; 140 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); 141 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, 142 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); 143 if (IS_ERR(gc_th->f2fs_gc_task)) { 144 err = PTR_ERR(gc_th->f2fs_gc_task); 145 kfree(gc_th); 146 sbi->gc_thread = NULL; 147 } 148 out: 149 return err; 150 } 151 152 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi) 153 { 154 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; 155 if (!gc_th) 156 return; 157 kthread_stop(gc_th->f2fs_gc_task); 158 kfree(gc_th); 159 sbi->gc_thread = NULL; 160 } 161 162 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type) 163 { 164 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY; 165 166 switch (sbi->gc_mode) { 167 case GC_IDLE_CB: 168 gc_mode = GC_CB; 169 break; 170 case GC_IDLE_GREEDY: 171 case GC_URGENT: 172 gc_mode = GC_GREEDY; 173 break; 174 } 175 return gc_mode; 176 } 177 178 static void select_policy(struct f2fs_sb_info *sbi, int gc_type, 179 int type, struct victim_sel_policy *p) 180 { 181 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 182 183 if (p->alloc_mode == SSR) { 184 p->gc_mode = GC_GREEDY; 185 p->dirty_segmap = dirty_i->dirty_segmap[type]; 186 p->max_search = dirty_i->nr_dirty[type]; 187 p->ofs_unit = 1; 188 } else { 189 p->gc_mode = select_gc_type(sbi, gc_type); 190 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY]; 191 p->max_search = dirty_i->nr_dirty[DIRTY]; 192 p->ofs_unit = sbi->segs_per_sec; 193 } 194 195 /* we need to check every dirty segments in the FG_GC case */ 196 if (gc_type != FG_GC && 197 (sbi->gc_mode != GC_URGENT) && 198 p->max_search > sbi->max_victim_search) 199 p->max_search = sbi->max_victim_search; 200 201 /* let's select beginning hot/small space first in no_heap mode*/ 202 if (test_opt(sbi, NOHEAP) && 203 (type == CURSEG_HOT_DATA || IS_NODESEG(type))) 204 p->offset = 0; 205 else 206 p->offset = SIT_I(sbi)->last_victim[p->gc_mode]; 207 } 208 209 static unsigned int get_max_cost(struct f2fs_sb_info *sbi, 210 struct victim_sel_policy *p) 211 { 212 /* SSR allocates in a segment unit */ 213 if (p->alloc_mode == SSR) 214 return sbi->blocks_per_seg; 215 if (p->gc_mode == GC_GREEDY) 216 return 2 * sbi->blocks_per_seg * p->ofs_unit; 217 else if (p->gc_mode == GC_CB) 218 return UINT_MAX; 219 else /* No other gc_mode */ 220 return 0; 221 } 222 223 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) 224 { 225 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 226 unsigned int secno; 227 228 /* 229 * If the gc_type is FG_GC, we can select victim segments 230 * selected by background GC before. 231 * Those segments guarantee they have small valid blocks. 232 */ 233 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { 234 if (sec_usage_check(sbi, secno)) 235 continue; 236 clear_bit(secno, dirty_i->victim_secmap); 237 return GET_SEG_FROM_SEC(sbi, secno); 238 } 239 return NULL_SEGNO; 240 } 241 242 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) 243 { 244 struct sit_info *sit_i = SIT_I(sbi); 245 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 246 unsigned int start = GET_SEG_FROM_SEC(sbi, secno); 247 unsigned long long mtime = 0; 248 unsigned int vblocks; 249 unsigned char age = 0; 250 unsigned char u; 251 unsigned int i; 252 253 for (i = 0; i < sbi->segs_per_sec; i++) 254 mtime += get_seg_entry(sbi, start + i)->mtime; 255 vblocks = get_valid_blocks(sbi, segno, true); 256 257 mtime = div_u64(mtime, sbi->segs_per_sec); 258 vblocks = div_u64(vblocks, sbi->segs_per_sec); 259 260 u = (vblocks * 100) >> sbi->log_blocks_per_seg; 261 262 /* Handle if the system time has changed by the user */ 263 if (mtime < sit_i->min_mtime) 264 sit_i->min_mtime = mtime; 265 if (mtime > sit_i->max_mtime) 266 sit_i->max_mtime = mtime; 267 if (sit_i->max_mtime != sit_i->min_mtime) 268 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), 269 sit_i->max_mtime - sit_i->min_mtime); 270 271 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); 272 } 273 274 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, 275 unsigned int segno, struct victim_sel_policy *p) 276 { 277 if (p->alloc_mode == SSR) 278 return get_seg_entry(sbi, segno)->ckpt_valid_blocks; 279 280 /* alloc_mode == LFS */ 281 if (p->gc_mode == GC_GREEDY) 282 return get_valid_blocks(sbi, segno, true); 283 else 284 return get_cb_cost(sbi, segno); 285 } 286 287 static unsigned int count_bits(const unsigned long *addr, 288 unsigned int offset, unsigned int len) 289 { 290 unsigned int end = offset + len, sum = 0; 291 292 while (offset < end) { 293 if (test_bit(offset++, addr)) 294 ++sum; 295 } 296 return sum; 297 } 298 299 /* 300 * This function is called from two paths. 301 * One is garbage collection and the other is SSR segment selection. 302 * When it is called during GC, it just gets a victim segment 303 * and it does not remove it from dirty seglist. 304 * When it is called from SSR segment selection, it finds a segment 305 * which has minimum valid blocks and removes it from dirty seglist. 306 */ 307 static int get_victim_by_default(struct f2fs_sb_info *sbi, 308 unsigned int *result, int gc_type, int type, char alloc_mode) 309 { 310 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 311 struct sit_info *sm = SIT_I(sbi); 312 struct victim_sel_policy p; 313 unsigned int secno, last_victim; 314 unsigned int last_segment = MAIN_SEGS(sbi); 315 unsigned int nsearched = 0; 316 317 mutex_lock(&dirty_i->seglist_lock); 318 319 p.alloc_mode = alloc_mode; 320 select_policy(sbi, gc_type, type, &p); 321 322 p.min_segno = NULL_SEGNO; 323 p.min_cost = get_max_cost(sbi, &p); 324 325 if (*result != NULL_SEGNO) { 326 if (IS_DATASEG(get_seg_entry(sbi, *result)->type) && 327 get_valid_blocks(sbi, *result, false) && 328 !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) 329 p.min_segno = *result; 330 goto out; 331 } 332 333 if (p.max_search == 0) 334 goto out; 335 336 last_victim = sm->last_victim[p.gc_mode]; 337 if (p.alloc_mode == LFS && gc_type == FG_GC) { 338 p.min_segno = check_bg_victims(sbi); 339 if (p.min_segno != NULL_SEGNO) 340 goto got_it; 341 } 342 343 while (1) { 344 unsigned long cost; 345 unsigned int segno; 346 347 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset); 348 if (segno >= last_segment) { 349 if (sm->last_victim[p.gc_mode]) { 350 last_segment = 351 sm->last_victim[p.gc_mode]; 352 sm->last_victim[p.gc_mode] = 0; 353 p.offset = 0; 354 continue; 355 } 356 break; 357 } 358 359 p.offset = segno + p.ofs_unit; 360 if (p.ofs_unit > 1) { 361 p.offset -= segno % p.ofs_unit; 362 nsearched += count_bits(p.dirty_segmap, 363 p.offset - p.ofs_unit, 364 p.ofs_unit); 365 } else { 366 nsearched++; 367 } 368 369 secno = GET_SEC_FROM_SEG(sbi, segno); 370 371 if (sec_usage_check(sbi, secno)) 372 goto next; 373 /* Don't touch checkpointed data */ 374 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 375 get_ckpt_valid_blocks(sbi, segno))) 376 goto next; 377 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) 378 goto next; 379 380 cost = get_gc_cost(sbi, segno, &p); 381 382 if (p.min_cost > cost) { 383 p.min_segno = segno; 384 p.min_cost = cost; 385 } 386 next: 387 if (nsearched >= p.max_search) { 388 if (!sm->last_victim[p.gc_mode] && segno <= last_victim) 389 sm->last_victim[p.gc_mode] = last_victim + 1; 390 else 391 sm->last_victim[p.gc_mode] = segno + 1; 392 sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi); 393 break; 394 } 395 } 396 if (p.min_segno != NULL_SEGNO) { 397 got_it: 398 if (p.alloc_mode == LFS) { 399 secno = GET_SEC_FROM_SEG(sbi, p.min_segno); 400 if (gc_type == FG_GC) 401 sbi->cur_victim_sec = secno; 402 else 403 set_bit(secno, dirty_i->victim_secmap); 404 } 405 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; 406 407 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, 408 sbi->cur_victim_sec, 409 prefree_segments(sbi), free_segments(sbi)); 410 } 411 out: 412 mutex_unlock(&dirty_i->seglist_lock); 413 414 return (p.min_segno == NULL_SEGNO) ? 0 : 1; 415 } 416 417 static const struct victim_selection default_v_ops = { 418 .get_victim = get_victim_by_default, 419 }; 420 421 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino) 422 { 423 struct inode_entry *ie; 424 425 ie = radix_tree_lookup(&gc_list->iroot, ino); 426 if (ie) 427 return ie->inode; 428 return NULL; 429 } 430 431 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode) 432 { 433 struct inode_entry *new_ie; 434 435 if (inode == find_gc_inode(gc_list, inode->i_ino)) { 436 iput(inode); 437 return; 438 } 439 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS); 440 new_ie->inode = inode; 441 442 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie); 443 list_add_tail(&new_ie->list, &gc_list->ilist); 444 } 445 446 static void put_gc_inode(struct gc_inode_list *gc_list) 447 { 448 struct inode_entry *ie, *next_ie; 449 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) { 450 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino); 451 iput(ie->inode); 452 list_del(&ie->list); 453 kmem_cache_free(f2fs_inode_entry_slab, ie); 454 } 455 } 456 457 static int check_valid_map(struct f2fs_sb_info *sbi, 458 unsigned int segno, int offset) 459 { 460 struct sit_info *sit_i = SIT_I(sbi); 461 struct seg_entry *sentry; 462 int ret; 463 464 down_read(&sit_i->sentry_lock); 465 sentry = get_seg_entry(sbi, segno); 466 ret = f2fs_test_bit(offset, sentry->cur_valid_map); 467 up_read(&sit_i->sentry_lock); 468 return ret; 469 } 470 471 /* 472 * This function compares node address got in summary with that in NAT. 473 * On validity, copy that node with cold status, otherwise (invalid node) 474 * ignore that. 475 */ 476 static void gc_node_segment(struct f2fs_sb_info *sbi, 477 struct f2fs_summary *sum, unsigned int segno, int gc_type) 478 { 479 struct f2fs_summary *entry; 480 block_t start_addr; 481 int off; 482 int phase = 0; 483 bool fggc = (gc_type == FG_GC); 484 485 start_addr = START_BLOCK(sbi, segno); 486 487 next_step: 488 entry = sum; 489 490 if (fggc && phase == 2) 491 atomic_inc(&sbi->wb_sync_req[NODE]); 492 493 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { 494 nid_t nid = le32_to_cpu(entry->nid); 495 struct page *node_page; 496 struct node_info ni; 497 498 /* stop BG_GC if there is not enough free sections. */ 499 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) 500 return; 501 502 if (check_valid_map(sbi, segno, off) == 0) 503 continue; 504 505 if (phase == 0) { 506 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, 507 META_NAT, true); 508 continue; 509 } 510 511 if (phase == 1) { 512 f2fs_ra_node_page(sbi, nid); 513 continue; 514 } 515 516 /* phase == 2 */ 517 node_page = f2fs_get_node_page(sbi, nid); 518 if (IS_ERR(node_page)) 519 continue; 520 521 /* block may become invalid during f2fs_get_node_page */ 522 if (check_valid_map(sbi, segno, off) == 0) { 523 f2fs_put_page(node_page, 1); 524 continue; 525 } 526 527 if (f2fs_get_node_info(sbi, nid, &ni)) { 528 f2fs_put_page(node_page, 1); 529 continue; 530 } 531 532 if (ni.blk_addr != start_addr + off) { 533 f2fs_put_page(node_page, 1); 534 continue; 535 } 536 537 f2fs_move_node_page(node_page, gc_type); 538 stat_inc_node_blk_count(sbi, 1, gc_type); 539 } 540 541 if (++phase < 3) 542 goto next_step; 543 544 if (fggc) 545 atomic_dec(&sbi->wb_sync_req[NODE]); 546 } 547 548 /* 549 * Calculate start block index indicating the given node offset. 550 * Be careful, caller should give this node offset only indicating direct node 551 * blocks. If any node offsets, which point the other types of node blocks such 552 * as indirect or double indirect node blocks, are given, it must be a caller's 553 * bug. 554 */ 555 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode) 556 { 557 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; 558 unsigned int bidx; 559 560 if (node_ofs == 0) 561 return 0; 562 563 if (node_ofs <= 2) { 564 bidx = node_ofs - 1; 565 } else if (node_ofs <= indirect_blks) { 566 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); 567 bidx = node_ofs - 2 - dec; 568 } else { 569 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); 570 bidx = node_ofs - 5 - dec; 571 } 572 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode); 573 } 574 575 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 576 struct node_info *dni, block_t blkaddr, unsigned int *nofs) 577 { 578 struct page *node_page; 579 nid_t nid; 580 unsigned int ofs_in_node; 581 block_t source_blkaddr; 582 583 nid = le32_to_cpu(sum->nid); 584 ofs_in_node = le16_to_cpu(sum->ofs_in_node); 585 586 node_page = f2fs_get_node_page(sbi, nid); 587 if (IS_ERR(node_page)) 588 return false; 589 590 if (f2fs_get_node_info(sbi, nid, dni)) { 591 f2fs_put_page(node_page, 1); 592 return false; 593 } 594 595 if (sum->version != dni->version) { 596 f2fs_msg(sbi->sb, KERN_WARNING, 597 "%s: valid data with mismatched node version.", 598 __func__); 599 set_sbi_flag(sbi, SBI_NEED_FSCK); 600 } 601 602 *nofs = ofs_of_node(node_page); 603 source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node); 604 f2fs_put_page(node_page, 1); 605 606 if (source_blkaddr != blkaddr) 607 return false; 608 return true; 609 } 610 611 static int ra_data_block(struct inode *inode, pgoff_t index) 612 { 613 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 614 struct address_space *mapping = inode->i_mapping; 615 struct dnode_of_data dn; 616 struct page *page; 617 struct extent_info ei = {0, 0, 0}; 618 struct f2fs_io_info fio = { 619 .sbi = sbi, 620 .ino = inode->i_ino, 621 .type = DATA, 622 .temp = COLD, 623 .op = REQ_OP_READ, 624 .op_flags = 0, 625 .encrypted_page = NULL, 626 .in_list = false, 627 .retry = false, 628 }; 629 int err; 630 631 page = f2fs_grab_cache_page(mapping, index, true); 632 if (!page) 633 return -ENOMEM; 634 635 if (f2fs_lookup_extent_cache(inode, index, &ei)) { 636 dn.data_blkaddr = ei.blk + index - ei.fofs; 637 goto got_it; 638 } 639 640 set_new_dnode(&dn, inode, NULL, NULL, 0); 641 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); 642 if (err) 643 goto put_page; 644 f2fs_put_dnode(&dn); 645 646 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, 647 DATA_GENERIC))) { 648 err = -EFAULT; 649 goto put_page; 650 } 651 got_it: 652 /* read page */ 653 fio.page = page; 654 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; 655 656 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi), 657 dn.data_blkaddr, 658 FGP_LOCK | FGP_CREAT, GFP_NOFS); 659 if (!fio.encrypted_page) { 660 err = -ENOMEM; 661 goto put_page; 662 } 663 664 err = f2fs_submit_page_bio(&fio); 665 if (err) 666 goto put_encrypted_page; 667 f2fs_put_page(fio.encrypted_page, 0); 668 f2fs_put_page(page, 1); 669 return 0; 670 put_encrypted_page: 671 f2fs_put_page(fio.encrypted_page, 1); 672 put_page: 673 f2fs_put_page(page, 1); 674 return err; 675 } 676 677 /* 678 * Move data block via META_MAPPING while keeping locked data page. 679 * This can be used to move blocks, aka LBAs, directly on disk. 680 */ 681 static void move_data_block(struct inode *inode, block_t bidx, 682 int gc_type, unsigned int segno, int off) 683 { 684 struct f2fs_io_info fio = { 685 .sbi = F2FS_I_SB(inode), 686 .ino = inode->i_ino, 687 .type = DATA, 688 .temp = COLD, 689 .op = REQ_OP_READ, 690 .op_flags = 0, 691 .encrypted_page = NULL, 692 .in_list = false, 693 .retry = false, 694 }; 695 struct dnode_of_data dn; 696 struct f2fs_summary sum; 697 struct node_info ni; 698 struct page *page, *mpage; 699 block_t newaddr; 700 int err; 701 bool lfs_mode = test_opt(fio.sbi, LFS); 702 703 /* do not read out */ 704 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false); 705 if (!page) 706 return; 707 708 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) 709 goto out; 710 711 if (f2fs_is_atomic_file(inode)) { 712 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++; 713 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++; 714 goto out; 715 } 716 717 if (f2fs_is_pinned_file(inode)) { 718 f2fs_pin_file_control(inode, true); 719 goto out; 720 } 721 722 set_new_dnode(&dn, inode, NULL, NULL, 0); 723 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE); 724 if (err) 725 goto out; 726 727 if (unlikely(dn.data_blkaddr == NULL_ADDR)) { 728 ClearPageUptodate(page); 729 goto put_out; 730 } 731 732 /* 733 * don't cache encrypted data into meta inode until previous dirty 734 * data were writebacked to avoid racing between GC and flush. 735 */ 736 f2fs_wait_on_page_writeback(page, DATA, true); 737 738 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni); 739 if (err) 740 goto put_out; 741 742 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); 743 744 /* read page */ 745 fio.page = page; 746 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; 747 748 if (lfs_mode) 749 down_write(&fio.sbi->io_order_lock); 750 751 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, 752 &sum, CURSEG_COLD_DATA, NULL, false); 753 754 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), 755 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); 756 if (!fio.encrypted_page) { 757 err = -ENOMEM; 758 goto recover_block; 759 } 760 761 mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), 762 fio.old_blkaddr, FGP_LOCK, GFP_NOFS); 763 if (mpage) { 764 bool updated = false; 765 766 if (PageUptodate(mpage)) { 767 memcpy(page_address(fio.encrypted_page), 768 page_address(mpage), PAGE_SIZE); 769 updated = true; 770 } 771 f2fs_put_page(mpage, 1); 772 invalidate_mapping_pages(META_MAPPING(fio.sbi), 773 fio.old_blkaddr, fio.old_blkaddr); 774 if (updated) 775 goto write_page; 776 } 777 778 err = f2fs_submit_page_bio(&fio); 779 if (err) 780 goto put_page_out; 781 782 /* write page */ 783 lock_page(fio.encrypted_page); 784 785 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) { 786 err = -EIO; 787 goto put_page_out; 788 } 789 if (unlikely(!PageUptodate(fio.encrypted_page))) { 790 err = -EIO; 791 goto put_page_out; 792 } 793 794 write_page: 795 set_page_dirty(fio.encrypted_page); 796 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true); 797 if (clear_page_dirty_for_io(fio.encrypted_page)) 798 dec_page_count(fio.sbi, F2FS_DIRTY_META); 799 800 set_page_writeback(fio.encrypted_page); 801 ClearPageError(page); 802 803 /* allocate block address */ 804 f2fs_wait_on_page_writeback(dn.node_page, NODE, true); 805 806 fio.op = REQ_OP_WRITE; 807 fio.op_flags = REQ_SYNC; 808 fio.new_blkaddr = newaddr; 809 f2fs_submit_page_write(&fio); 810 if (fio.retry) { 811 if (PageWriteback(fio.encrypted_page)) 812 end_page_writeback(fio.encrypted_page); 813 goto put_page_out; 814 } 815 816 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE); 817 818 f2fs_update_data_blkaddr(&dn, newaddr); 819 set_inode_flag(inode, FI_APPEND_WRITE); 820 if (page->index == 0) 821 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); 822 put_page_out: 823 f2fs_put_page(fio.encrypted_page, 1); 824 recover_block: 825 if (lfs_mode) 826 up_write(&fio.sbi->io_order_lock); 827 if (err) 828 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, 829 true, true); 830 put_out: 831 f2fs_put_dnode(&dn); 832 out: 833 f2fs_put_page(page, 1); 834 } 835 836 static void move_data_page(struct inode *inode, block_t bidx, int gc_type, 837 unsigned int segno, int off) 838 { 839 struct page *page; 840 841 page = f2fs_get_lock_data_page(inode, bidx, true); 842 if (IS_ERR(page)) 843 return; 844 845 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) 846 goto out; 847 848 if (f2fs_is_atomic_file(inode)) { 849 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++; 850 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++; 851 goto out; 852 } 853 if (f2fs_is_pinned_file(inode)) { 854 if (gc_type == FG_GC) 855 f2fs_pin_file_control(inode, true); 856 goto out; 857 } 858 859 if (gc_type == BG_GC) { 860 if (PageWriteback(page)) 861 goto out; 862 set_page_dirty(page); 863 set_cold_data(page); 864 } else { 865 struct f2fs_io_info fio = { 866 .sbi = F2FS_I_SB(inode), 867 .ino = inode->i_ino, 868 .type = DATA, 869 .temp = COLD, 870 .op = REQ_OP_WRITE, 871 .op_flags = REQ_SYNC, 872 .old_blkaddr = NULL_ADDR, 873 .page = page, 874 .encrypted_page = NULL, 875 .need_lock = LOCK_REQ, 876 .io_type = FS_GC_DATA_IO, 877 }; 878 bool is_dirty = PageDirty(page); 879 int err; 880 881 retry: 882 set_page_dirty(page); 883 f2fs_wait_on_page_writeback(page, DATA, true); 884 if (clear_page_dirty_for_io(page)) { 885 inode_dec_dirty_pages(inode); 886 f2fs_remove_dirty_inode(inode); 887 } 888 889 set_cold_data(page); 890 891 err = f2fs_do_write_data_page(&fio); 892 if (err) { 893 clear_cold_data(page); 894 if (err == -ENOMEM) { 895 congestion_wait(BLK_RW_ASYNC, HZ/50); 896 goto retry; 897 } 898 if (is_dirty) 899 set_page_dirty(page); 900 } 901 } 902 out: 903 f2fs_put_page(page, 1); 904 } 905 906 /* 907 * This function tries to get parent node of victim data block, and identifies 908 * data block validity. If the block is valid, copy that with cold status and 909 * modify parent node. 910 * If the parent node is not valid or the data block address is different, 911 * the victim data block is ignored. 912 */ 913 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 914 struct gc_inode_list *gc_list, unsigned int segno, int gc_type) 915 { 916 struct super_block *sb = sbi->sb; 917 struct f2fs_summary *entry; 918 block_t start_addr; 919 int off; 920 int phase = 0; 921 922 start_addr = START_BLOCK(sbi, segno); 923 924 next_step: 925 entry = sum; 926 927 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { 928 struct page *data_page; 929 struct inode *inode; 930 struct node_info dni; /* dnode info for the data */ 931 unsigned int ofs_in_node, nofs; 932 block_t start_bidx; 933 nid_t nid = le32_to_cpu(entry->nid); 934 935 /* stop BG_GC if there is not enough free sections. */ 936 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) 937 return; 938 939 if (check_valid_map(sbi, segno, off) == 0) 940 continue; 941 942 if (phase == 0) { 943 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, 944 META_NAT, true); 945 continue; 946 } 947 948 if (phase == 1) { 949 f2fs_ra_node_page(sbi, nid); 950 continue; 951 } 952 953 /* Get an inode by ino with checking validity */ 954 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs)) 955 continue; 956 957 if (phase == 2) { 958 f2fs_ra_node_page(sbi, dni.ino); 959 continue; 960 } 961 962 ofs_in_node = le16_to_cpu(entry->ofs_in_node); 963 964 if (phase == 3) { 965 inode = f2fs_iget(sb, dni.ino); 966 if (IS_ERR(inode) || is_bad_inode(inode)) 967 continue; 968 969 if (!down_write_trylock( 970 &F2FS_I(inode)->i_gc_rwsem[WRITE])) { 971 iput(inode); 972 sbi->skipped_gc_rwsem++; 973 continue; 974 } 975 976 start_bidx = f2fs_start_bidx_of_node(nofs, inode) + 977 ofs_in_node; 978 979 if (f2fs_post_read_required(inode)) { 980 int err = ra_data_block(inode, start_bidx); 981 982 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 983 if (err) { 984 iput(inode); 985 continue; 986 } 987 add_gc_inode(gc_list, inode); 988 continue; 989 } 990 991 data_page = f2fs_get_read_data_page(inode, 992 start_bidx, REQ_RAHEAD, true); 993 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 994 if (IS_ERR(data_page)) { 995 iput(inode); 996 continue; 997 } 998 999 f2fs_put_page(data_page, 0); 1000 add_gc_inode(gc_list, inode); 1001 continue; 1002 } 1003 1004 /* phase 4 */ 1005 inode = find_gc_inode(gc_list, dni.ino); 1006 if (inode) { 1007 struct f2fs_inode_info *fi = F2FS_I(inode); 1008 bool locked = false; 1009 1010 if (S_ISREG(inode->i_mode)) { 1011 if (!down_write_trylock(&fi->i_gc_rwsem[READ])) 1012 continue; 1013 if (!down_write_trylock( 1014 &fi->i_gc_rwsem[WRITE])) { 1015 sbi->skipped_gc_rwsem++; 1016 up_write(&fi->i_gc_rwsem[READ]); 1017 continue; 1018 } 1019 locked = true; 1020 1021 /* wait for all inflight aio data */ 1022 inode_dio_wait(inode); 1023 } 1024 1025 start_bidx = f2fs_start_bidx_of_node(nofs, inode) 1026 + ofs_in_node; 1027 if (f2fs_post_read_required(inode)) 1028 move_data_block(inode, start_bidx, gc_type, 1029 segno, off); 1030 else 1031 move_data_page(inode, start_bidx, gc_type, 1032 segno, off); 1033 1034 if (locked) { 1035 up_write(&fi->i_gc_rwsem[WRITE]); 1036 up_write(&fi->i_gc_rwsem[READ]); 1037 } 1038 1039 stat_inc_data_blk_count(sbi, 1, gc_type); 1040 } 1041 } 1042 1043 if (++phase < 5) 1044 goto next_step; 1045 } 1046 1047 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, 1048 int gc_type) 1049 { 1050 struct sit_info *sit_i = SIT_I(sbi); 1051 int ret; 1052 1053 down_write(&sit_i->sentry_lock); 1054 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, 1055 NO_CHECK_TYPE, LFS); 1056 up_write(&sit_i->sentry_lock); 1057 return ret; 1058 } 1059 1060 static int do_garbage_collect(struct f2fs_sb_info *sbi, 1061 unsigned int start_segno, 1062 struct gc_inode_list *gc_list, int gc_type) 1063 { 1064 struct page *sum_page; 1065 struct f2fs_summary_block *sum; 1066 struct blk_plug plug; 1067 unsigned int segno = start_segno; 1068 unsigned int end_segno = start_segno + sbi->segs_per_sec; 1069 int seg_freed = 0; 1070 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? 1071 SUM_TYPE_DATA : SUM_TYPE_NODE; 1072 1073 /* readahead multi ssa blocks those have contiguous address */ 1074 if (sbi->segs_per_sec > 1) 1075 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), 1076 sbi->segs_per_sec, META_SSA, true); 1077 1078 /* reference all summary page */ 1079 while (segno < end_segno) { 1080 sum_page = f2fs_get_sum_page(sbi, segno++); 1081 if (IS_ERR(sum_page)) { 1082 int err = PTR_ERR(sum_page); 1083 1084 end_segno = segno - 1; 1085 for (segno = start_segno; segno < end_segno; segno++) { 1086 sum_page = find_get_page(META_MAPPING(sbi), 1087 GET_SUM_BLOCK(sbi, segno)); 1088 f2fs_put_page(sum_page, 0); 1089 f2fs_put_page(sum_page, 0); 1090 } 1091 return err; 1092 } 1093 unlock_page(sum_page); 1094 } 1095 1096 blk_start_plug(&plug); 1097 1098 for (segno = start_segno; segno < end_segno; segno++) { 1099 1100 /* find segment summary of victim */ 1101 sum_page = find_get_page(META_MAPPING(sbi), 1102 GET_SUM_BLOCK(sbi, segno)); 1103 f2fs_put_page(sum_page, 0); 1104 1105 if (get_valid_blocks(sbi, segno, false) == 0 || 1106 !PageUptodate(sum_page) || 1107 unlikely(f2fs_cp_error(sbi))) 1108 goto next; 1109 1110 sum = page_address(sum_page); 1111 if (type != GET_SUM_TYPE((&sum->footer))) { 1112 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) " 1113 "type [%d, %d] in SSA and SIT", 1114 segno, type, GET_SUM_TYPE((&sum->footer))); 1115 set_sbi_flag(sbi, SBI_NEED_FSCK); 1116 goto next; 1117 } 1118 1119 /* 1120 * this is to avoid deadlock: 1121 * - lock_page(sum_page) - f2fs_replace_block 1122 * - check_valid_map() - down_write(sentry_lock) 1123 * - down_read(sentry_lock) - change_curseg() 1124 * - lock_page(sum_page) 1125 */ 1126 if (type == SUM_TYPE_NODE) 1127 gc_node_segment(sbi, sum->entries, segno, gc_type); 1128 else 1129 gc_data_segment(sbi, sum->entries, gc_list, segno, 1130 gc_type); 1131 1132 stat_inc_seg_count(sbi, type, gc_type); 1133 1134 if (gc_type == FG_GC && 1135 get_valid_blocks(sbi, segno, false) == 0) 1136 seg_freed++; 1137 next: 1138 f2fs_put_page(sum_page, 0); 1139 } 1140 1141 if (gc_type == FG_GC) 1142 f2fs_submit_merged_write(sbi, 1143 (type == SUM_TYPE_NODE) ? NODE : DATA); 1144 1145 blk_finish_plug(&plug); 1146 1147 stat_inc_call_count(sbi->stat_info); 1148 1149 return seg_freed; 1150 } 1151 1152 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, 1153 bool background, unsigned int segno) 1154 { 1155 int gc_type = sync ? FG_GC : BG_GC; 1156 int sec_freed = 0, seg_freed = 0, total_freed = 0; 1157 int ret = 0; 1158 struct cp_control cpc; 1159 unsigned int init_segno = segno; 1160 struct gc_inode_list gc_list = { 1161 .ilist = LIST_HEAD_INIT(gc_list.ilist), 1162 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), 1163 }; 1164 unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC]; 1165 unsigned long long first_skipped; 1166 unsigned int skipped_round = 0, round = 0; 1167 1168 trace_f2fs_gc_begin(sbi->sb, sync, background, 1169 get_pages(sbi, F2FS_DIRTY_NODES), 1170 get_pages(sbi, F2FS_DIRTY_DENTS), 1171 get_pages(sbi, F2FS_DIRTY_IMETA), 1172 free_sections(sbi), 1173 free_segments(sbi), 1174 reserved_segments(sbi), 1175 prefree_segments(sbi)); 1176 1177 cpc.reason = __get_cp_reason(sbi); 1178 sbi->skipped_gc_rwsem = 0; 1179 first_skipped = last_skipped; 1180 gc_more: 1181 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) { 1182 ret = -EINVAL; 1183 goto stop; 1184 } 1185 if (unlikely(f2fs_cp_error(sbi))) { 1186 ret = -EIO; 1187 goto stop; 1188 } 1189 1190 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) { 1191 /* 1192 * For example, if there are many prefree_segments below given 1193 * threshold, we can make them free by checkpoint. Then, we 1194 * secure free segments which doesn't need fggc any more. 1195 */ 1196 if (prefree_segments(sbi) && 1197 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { 1198 ret = f2fs_write_checkpoint(sbi, &cpc); 1199 if (ret) 1200 goto stop; 1201 } 1202 if (has_not_enough_free_secs(sbi, 0, 0)) 1203 gc_type = FG_GC; 1204 } 1205 1206 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */ 1207 if (gc_type == BG_GC && !background) { 1208 ret = -EINVAL; 1209 goto stop; 1210 } 1211 if (!__get_victim(sbi, &segno, gc_type)) { 1212 ret = -ENODATA; 1213 goto stop; 1214 } 1215 1216 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type); 1217 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec) 1218 sec_freed++; 1219 total_freed += seg_freed; 1220 1221 if (gc_type == FG_GC) { 1222 if (sbi->skipped_atomic_files[FG_GC] > last_skipped || 1223 sbi->skipped_gc_rwsem) 1224 skipped_round++; 1225 last_skipped = sbi->skipped_atomic_files[FG_GC]; 1226 round++; 1227 } 1228 1229 if (gc_type == FG_GC) 1230 sbi->cur_victim_sec = NULL_SEGNO; 1231 1232 if (sync) 1233 goto stop; 1234 1235 if (has_not_enough_free_secs(sbi, sec_freed, 0)) { 1236 if (skipped_round <= MAX_SKIP_GC_COUNT || 1237 skipped_round * 2 < round) { 1238 segno = NULL_SEGNO; 1239 goto gc_more; 1240 } 1241 1242 if (first_skipped < last_skipped && 1243 (last_skipped - first_skipped) > 1244 sbi->skipped_gc_rwsem) { 1245 f2fs_drop_inmem_pages_all(sbi, true); 1246 segno = NULL_SEGNO; 1247 goto gc_more; 1248 } 1249 if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) 1250 ret = f2fs_write_checkpoint(sbi, &cpc); 1251 } 1252 stop: 1253 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0; 1254 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno; 1255 1256 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed, 1257 get_pages(sbi, F2FS_DIRTY_NODES), 1258 get_pages(sbi, F2FS_DIRTY_DENTS), 1259 get_pages(sbi, F2FS_DIRTY_IMETA), 1260 free_sections(sbi), 1261 free_segments(sbi), 1262 reserved_segments(sbi), 1263 prefree_segments(sbi)); 1264 1265 mutex_unlock(&sbi->gc_mutex); 1266 1267 put_gc_inode(&gc_list); 1268 1269 if (sync && !ret) 1270 ret = sec_freed ? 0 : -EAGAIN; 1271 return ret; 1272 } 1273 1274 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) 1275 { 1276 DIRTY_I(sbi)->v_ops = &default_v_ops; 1277 1278 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; 1279 1280 /* give warm/cold data area from slower device */ 1281 if (sbi->s_ndevs && sbi->segs_per_sec == 1) 1282 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 1283 GET_SEGNO(sbi, FDEV(0).end_blk) + 1; 1284 } 1285