1 /* 2 * fs/f2fs/gc.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/module.h> 13 #include <linux/backing-dev.h> 14 #include <linux/init.h> 15 #include <linux/f2fs_fs.h> 16 #include <linux/kthread.h> 17 #include <linux/delay.h> 18 #include <linux/freezer.h> 19 20 #include "f2fs.h" 21 #include "node.h" 22 #include "segment.h" 23 #include "gc.h" 24 #include <trace/events/f2fs.h> 25 26 static int gc_thread_func(void *data) 27 { 28 struct f2fs_sb_info *sbi = data; 29 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; 30 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; 31 long wait_ms; 32 33 wait_ms = gc_th->min_sleep_time; 34 35 set_freezable(); 36 do { 37 wait_event_interruptible_timeout(*wq, 38 kthread_should_stop() || freezing(current), 39 msecs_to_jiffies(wait_ms)); 40 41 if (try_to_freeze()) 42 continue; 43 if (kthread_should_stop()) 44 break; 45 46 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { 47 increase_sleep_time(gc_th, &wait_ms); 48 continue; 49 } 50 51 #ifdef CONFIG_F2FS_FAULT_INJECTION 52 if (time_to_inject(sbi, FAULT_CHECKPOINT)) { 53 f2fs_show_injection_info(FAULT_CHECKPOINT); 54 f2fs_stop_checkpoint(sbi, false); 55 } 56 #endif 57 58 /* 59 * [GC triggering condition] 60 * 0. GC is not conducted currently. 61 * 1. There are enough dirty segments. 62 * 2. IO subsystem is idle by checking the # of writeback pages. 63 * 3. IO subsystem is idle by checking the # of requests in 64 * bdev's request list. 65 * 66 * Note) We have to avoid triggering GCs frequently. 67 * Because it is possible that some segments can be 68 * invalidated soon after by user update or deletion. 69 * So, I'd like to wait some time to collect dirty segments. 70 */ 71 if (!mutex_trylock(&sbi->gc_mutex)) 72 continue; 73 74 if (!is_idle(sbi)) { 75 increase_sleep_time(gc_th, &wait_ms); 76 mutex_unlock(&sbi->gc_mutex); 77 continue; 78 } 79 80 if (has_enough_invalid_blocks(sbi)) 81 decrease_sleep_time(gc_th, &wait_ms); 82 else 83 increase_sleep_time(gc_th, &wait_ms); 84 85 stat_inc_bggc_count(sbi); 86 87 /* if return value is not zero, no victim was selected */ 88 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO)) 89 wait_ms = gc_th->no_gc_sleep_time; 90 91 trace_f2fs_background_gc(sbi->sb, wait_ms, 92 prefree_segments(sbi), free_segments(sbi)); 93 94 /* balancing f2fs's metadata periodically */ 95 f2fs_balance_fs_bg(sbi); 96 97 } while (!kthread_should_stop()); 98 return 0; 99 } 100 101 int start_gc_thread(struct f2fs_sb_info *sbi) 102 { 103 struct f2fs_gc_kthread *gc_th; 104 dev_t dev = sbi->sb->s_bdev->bd_dev; 105 int err = 0; 106 107 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL); 108 if (!gc_th) { 109 err = -ENOMEM; 110 goto out; 111 } 112 113 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME; 114 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME; 115 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME; 116 117 gc_th->gc_idle = 0; 118 119 sbi->gc_thread = gc_th; 120 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); 121 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, 122 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); 123 if (IS_ERR(gc_th->f2fs_gc_task)) { 124 err = PTR_ERR(gc_th->f2fs_gc_task); 125 kfree(gc_th); 126 sbi->gc_thread = NULL; 127 } 128 out: 129 return err; 130 } 131 132 void stop_gc_thread(struct f2fs_sb_info *sbi) 133 { 134 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; 135 if (!gc_th) 136 return; 137 kthread_stop(gc_th->f2fs_gc_task); 138 kfree(gc_th); 139 sbi->gc_thread = NULL; 140 } 141 142 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type) 143 { 144 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY; 145 146 if (gc_th && gc_th->gc_idle) { 147 if (gc_th->gc_idle == 1) 148 gc_mode = GC_CB; 149 else if (gc_th->gc_idle == 2) 150 gc_mode = GC_GREEDY; 151 } 152 return gc_mode; 153 } 154 155 static void select_policy(struct f2fs_sb_info *sbi, int gc_type, 156 int type, struct victim_sel_policy *p) 157 { 158 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 159 160 if (p->alloc_mode == SSR) { 161 p->gc_mode = GC_GREEDY; 162 p->dirty_segmap = dirty_i->dirty_segmap[type]; 163 p->max_search = dirty_i->nr_dirty[type]; 164 p->ofs_unit = 1; 165 } else { 166 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type); 167 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY]; 168 p->max_search = dirty_i->nr_dirty[DIRTY]; 169 p->ofs_unit = sbi->segs_per_sec; 170 } 171 172 /* we need to check every dirty segments in the FG_GC case */ 173 if (gc_type != FG_GC && p->max_search > sbi->max_victim_search) 174 p->max_search = sbi->max_victim_search; 175 176 /* let's select beginning hot/small space first */ 177 if (type == CURSEG_HOT_DATA || IS_NODESEG(type)) 178 p->offset = 0; 179 else 180 p->offset = SIT_I(sbi)->last_victim[p->gc_mode]; 181 } 182 183 static unsigned int get_max_cost(struct f2fs_sb_info *sbi, 184 struct victim_sel_policy *p) 185 { 186 /* SSR allocates in a segment unit */ 187 if (p->alloc_mode == SSR) 188 return sbi->blocks_per_seg; 189 if (p->gc_mode == GC_GREEDY) 190 return 2 * sbi->blocks_per_seg * p->ofs_unit; 191 else if (p->gc_mode == GC_CB) 192 return UINT_MAX; 193 else /* No other gc_mode */ 194 return 0; 195 } 196 197 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) 198 { 199 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 200 unsigned int secno; 201 202 /* 203 * If the gc_type is FG_GC, we can select victim segments 204 * selected by background GC before. 205 * Those segments guarantee they have small valid blocks. 206 */ 207 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { 208 if (sec_usage_check(sbi, secno)) 209 continue; 210 211 if (no_fggc_candidate(sbi, secno)) 212 continue; 213 214 clear_bit(secno, dirty_i->victim_secmap); 215 return GET_SEG_FROM_SEC(sbi, secno); 216 } 217 return NULL_SEGNO; 218 } 219 220 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) 221 { 222 struct sit_info *sit_i = SIT_I(sbi); 223 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 224 unsigned int start = GET_SEG_FROM_SEC(sbi, secno); 225 unsigned long long mtime = 0; 226 unsigned int vblocks; 227 unsigned char age = 0; 228 unsigned char u; 229 unsigned int i; 230 231 for (i = 0; i < sbi->segs_per_sec; i++) 232 mtime += get_seg_entry(sbi, start + i)->mtime; 233 vblocks = get_valid_blocks(sbi, segno, true); 234 235 mtime = div_u64(mtime, sbi->segs_per_sec); 236 vblocks = div_u64(vblocks, sbi->segs_per_sec); 237 238 u = (vblocks * 100) >> sbi->log_blocks_per_seg; 239 240 /* Handle if the system time has changed by the user */ 241 if (mtime < sit_i->min_mtime) 242 sit_i->min_mtime = mtime; 243 if (mtime > sit_i->max_mtime) 244 sit_i->max_mtime = mtime; 245 if (sit_i->max_mtime != sit_i->min_mtime) 246 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), 247 sit_i->max_mtime - sit_i->min_mtime); 248 249 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); 250 } 251 252 static unsigned int get_greedy_cost(struct f2fs_sb_info *sbi, 253 unsigned int segno) 254 { 255 unsigned int valid_blocks = 256 get_valid_blocks(sbi, segno, true); 257 258 return IS_DATASEG(get_seg_entry(sbi, segno)->type) ? 259 valid_blocks * 2 : valid_blocks; 260 } 261 262 static unsigned int get_ssr_cost(struct f2fs_sb_info *sbi, 263 unsigned int segno) 264 { 265 struct seg_entry *se = get_seg_entry(sbi, segno); 266 267 return se->ckpt_valid_blocks > se->valid_blocks ? 268 se->ckpt_valid_blocks : se->valid_blocks; 269 } 270 271 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, 272 unsigned int segno, struct victim_sel_policy *p) 273 { 274 if (p->alloc_mode == SSR) 275 return get_ssr_cost(sbi, segno); 276 277 /* alloc_mode == LFS */ 278 if (p->gc_mode == GC_GREEDY) 279 return get_greedy_cost(sbi, segno); 280 else 281 return get_cb_cost(sbi, segno); 282 } 283 284 static unsigned int count_bits(const unsigned long *addr, 285 unsigned int offset, unsigned int len) 286 { 287 unsigned int end = offset + len, sum = 0; 288 289 while (offset < end) { 290 if (test_bit(offset++, addr)) 291 ++sum; 292 } 293 return sum; 294 } 295 296 /* 297 * This function is called from two paths. 298 * One is garbage collection and the other is SSR segment selection. 299 * When it is called during GC, it just gets a victim segment 300 * and it does not remove it from dirty seglist. 301 * When it is called from SSR segment selection, it finds a segment 302 * which has minimum valid blocks and removes it from dirty seglist. 303 */ 304 static int get_victim_by_default(struct f2fs_sb_info *sbi, 305 unsigned int *result, int gc_type, int type, char alloc_mode) 306 { 307 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 308 struct sit_info *sm = SIT_I(sbi); 309 struct victim_sel_policy p; 310 unsigned int secno, last_victim; 311 unsigned int last_segment = MAIN_SEGS(sbi); 312 unsigned int nsearched = 0; 313 314 mutex_lock(&dirty_i->seglist_lock); 315 316 p.alloc_mode = alloc_mode; 317 select_policy(sbi, gc_type, type, &p); 318 319 p.min_segno = NULL_SEGNO; 320 p.min_cost = get_max_cost(sbi, &p); 321 322 if (*result != NULL_SEGNO) { 323 if (IS_DATASEG(get_seg_entry(sbi, *result)->type) && 324 get_valid_blocks(sbi, *result, false) && 325 !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) 326 p.min_segno = *result; 327 goto out; 328 } 329 330 if (p.max_search == 0) 331 goto out; 332 333 last_victim = sm->last_victim[p.gc_mode]; 334 if (p.alloc_mode == LFS && gc_type == FG_GC) { 335 p.min_segno = check_bg_victims(sbi); 336 if (p.min_segno != NULL_SEGNO) 337 goto got_it; 338 } 339 340 while (1) { 341 unsigned long cost; 342 unsigned int segno; 343 344 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset); 345 if (segno >= last_segment) { 346 if (sm->last_victim[p.gc_mode]) { 347 last_segment = 348 sm->last_victim[p.gc_mode]; 349 sm->last_victim[p.gc_mode] = 0; 350 p.offset = 0; 351 continue; 352 } 353 break; 354 } 355 356 p.offset = segno + p.ofs_unit; 357 if (p.ofs_unit > 1) { 358 p.offset -= segno % p.ofs_unit; 359 nsearched += count_bits(p.dirty_segmap, 360 p.offset - p.ofs_unit, 361 p.ofs_unit); 362 } else { 363 nsearched++; 364 } 365 366 secno = GET_SEC_FROM_SEG(sbi, segno); 367 368 if (sec_usage_check(sbi, secno)) 369 goto next; 370 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) 371 goto next; 372 if (gc_type == FG_GC && p.alloc_mode == LFS && 373 no_fggc_candidate(sbi, secno)) 374 goto next; 375 376 cost = get_gc_cost(sbi, segno, &p); 377 378 if (p.min_cost > cost) { 379 p.min_segno = segno; 380 p.min_cost = cost; 381 } 382 next: 383 if (nsearched >= p.max_search) { 384 if (!sm->last_victim[p.gc_mode] && segno <= last_victim) 385 sm->last_victim[p.gc_mode] = last_victim + 1; 386 else 387 sm->last_victim[p.gc_mode] = segno + 1; 388 sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi); 389 break; 390 } 391 } 392 if (p.min_segno != NULL_SEGNO) { 393 got_it: 394 if (p.alloc_mode == LFS) { 395 secno = GET_SEC_FROM_SEG(sbi, p.min_segno); 396 if (gc_type == FG_GC) 397 sbi->cur_victim_sec = secno; 398 else 399 set_bit(secno, dirty_i->victim_secmap); 400 } 401 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; 402 403 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, 404 sbi->cur_victim_sec, 405 prefree_segments(sbi), free_segments(sbi)); 406 } 407 out: 408 mutex_unlock(&dirty_i->seglist_lock); 409 410 return (p.min_segno == NULL_SEGNO) ? 0 : 1; 411 } 412 413 static const struct victim_selection default_v_ops = { 414 .get_victim = get_victim_by_default, 415 }; 416 417 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino) 418 { 419 struct inode_entry *ie; 420 421 ie = radix_tree_lookup(&gc_list->iroot, ino); 422 if (ie) 423 return ie->inode; 424 return NULL; 425 } 426 427 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode) 428 { 429 struct inode_entry *new_ie; 430 431 if (inode == find_gc_inode(gc_list, inode->i_ino)) { 432 iput(inode); 433 return; 434 } 435 new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS); 436 new_ie->inode = inode; 437 438 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie); 439 list_add_tail(&new_ie->list, &gc_list->ilist); 440 } 441 442 static void put_gc_inode(struct gc_inode_list *gc_list) 443 { 444 struct inode_entry *ie, *next_ie; 445 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) { 446 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino); 447 iput(ie->inode); 448 list_del(&ie->list); 449 kmem_cache_free(inode_entry_slab, ie); 450 } 451 } 452 453 static int check_valid_map(struct f2fs_sb_info *sbi, 454 unsigned int segno, int offset) 455 { 456 struct sit_info *sit_i = SIT_I(sbi); 457 struct seg_entry *sentry; 458 int ret; 459 460 mutex_lock(&sit_i->sentry_lock); 461 sentry = get_seg_entry(sbi, segno); 462 ret = f2fs_test_bit(offset, sentry->cur_valid_map); 463 mutex_unlock(&sit_i->sentry_lock); 464 return ret; 465 } 466 467 /* 468 * This function compares node address got in summary with that in NAT. 469 * On validity, copy that node with cold status, otherwise (invalid node) 470 * ignore that. 471 */ 472 static void gc_node_segment(struct f2fs_sb_info *sbi, 473 struct f2fs_summary *sum, unsigned int segno, int gc_type) 474 { 475 struct f2fs_summary *entry; 476 block_t start_addr; 477 int off; 478 int phase = 0; 479 480 start_addr = START_BLOCK(sbi, segno); 481 482 next_step: 483 entry = sum; 484 485 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { 486 nid_t nid = le32_to_cpu(entry->nid); 487 struct page *node_page; 488 struct node_info ni; 489 490 /* stop BG_GC if there is not enough free sections. */ 491 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) 492 return; 493 494 if (check_valid_map(sbi, segno, off) == 0) 495 continue; 496 497 if (phase == 0) { 498 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, 499 META_NAT, true); 500 continue; 501 } 502 503 if (phase == 1) { 504 ra_node_page(sbi, nid); 505 continue; 506 } 507 508 /* phase == 2 */ 509 node_page = get_node_page(sbi, nid); 510 if (IS_ERR(node_page)) 511 continue; 512 513 /* block may become invalid during get_node_page */ 514 if (check_valid_map(sbi, segno, off) == 0) { 515 f2fs_put_page(node_page, 1); 516 continue; 517 } 518 519 get_node_info(sbi, nid, &ni); 520 if (ni.blk_addr != start_addr + off) { 521 f2fs_put_page(node_page, 1); 522 continue; 523 } 524 525 move_node_page(node_page, gc_type); 526 stat_inc_node_blk_count(sbi, 1, gc_type); 527 } 528 529 if (++phase < 3) 530 goto next_step; 531 } 532 533 /* 534 * Calculate start block index indicating the given node offset. 535 * Be careful, caller should give this node offset only indicating direct node 536 * blocks. If any node offsets, which point the other types of node blocks such 537 * as indirect or double indirect node blocks, are given, it must be a caller's 538 * bug. 539 */ 540 block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode) 541 { 542 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; 543 unsigned int bidx; 544 545 if (node_ofs == 0) 546 return 0; 547 548 if (node_ofs <= 2) { 549 bidx = node_ofs - 1; 550 } else if (node_ofs <= indirect_blks) { 551 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); 552 bidx = node_ofs - 2 - dec; 553 } else { 554 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); 555 bidx = node_ofs - 5 - dec; 556 } 557 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode); 558 } 559 560 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 561 struct node_info *dni, block_t blkaddr, unsigned int *nofs) 562 { 563 struct page *node_page; 564 nid_t nid; 565 unsigned int ofs_in_node; 566 block_t source_blkaddr; 567 568 nid = le32_to_cpu(sum->nid); 569 ofs_in_node = le16_to_cpu(sum->ofs_in_node); 570 571 node_page = get_node_page(sbi, nid); 572 if (IS_ERR(node_page)) 573 return false; 574 575 get_node_info(sbi, nid, dni); 576 577 if (sum->version != dni->version) { 578 f2fs_msg(sbi->sb, KERN_WARNING, 579 "%s: valid data with mismatched node version.", 580 __func__); 581 set_sbi_flag(sbi, SBI_NEED_FSCK); 582 } 583 584 *nofs = ofs_of_node(node_page); 585 source_blkaddr = datablock_addr(node_page, ofs_in_node); 586 f2fs_put_page(node_page, 1); 587 588 if (source_blkaddr != blkaddr) 589 return false; 590 return true; 591 } 592 593 static void move_encrypted_block(struct inode *inode, block_t bidx, 594 unsigned int segno, int off) 595 { 596 struct f2fs_io_info fio = { 597 .sbi = F2FS_I_SB(inode), 598 .type = DATA, 599 .temp = COLD, 600 .op = REQ_OP_READ, 601 .op_flags = 0, 602 .encrypted_page = NULL, 603 .in_list = false, 604 }; 605 struct dnode_of_data dn; 606 struct f2fs_summary sum; 607 struct node_info ni; 608 struct page *page; 609 block_t newaddr; 610 int err; 611 612 /* do not read out */ 613 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false); 614 if (!page) 615 return; 616 617 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) 618 goto out; 619 620 if (f2fs_is_atomic_file(inode)) 621 goto out; 622 623 set_new_dnode(&dn, inode, NULL, NULL, 0); 624 err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE); 625 if (err) 626 goto out; 627 628 if (unlikely(dn.data_blkaddr == NULL_ADDR)) { 629 ClearPageUptodate(page); 630 goto put_out; 631 } 632 633 /* 634 * don't cache encrypted data into meta inode until previous dirty 635 * data were writebacked to avoid racing between GC and flush. 636 */ 637 f2fs_wait_on_page_writeback(page, DATA, true); 638 639 get_node_info(fio.sbi, dn.nid, &ni); 640 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); 641 642 /* read page */ 643 fio.page = page; 644 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; 645 646 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, 647 &sum, CURSEG_COLD_DATA, NULL, false); 648 649 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr, 650 FGP_LOCK | FGP_CREAT, GFP_NOFS); 651 if (!fio.encrypted_page) { 652 err = -ENOMEM; 653 goto recover_block; 654 } 655 656 err = f2fs_submit_page_bio(&fio); 657 if (err) 658 goto put_page_out; 659 660 /* write page */ 661 lock_page(fio.encrypted_page); 662 663 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) { 664 err = -EIO; 665 goto put_page_out; 666 } 667 if (unlikely(!PageUptodate(fio.encrypted_page))) { 668 err = -EIO; 669 goto put_page_out; 670 } 671 672 set_page_dirty(fio.encrypted_page); 673 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true); 674 if (clear_page_dirty_for_io(fio.encrypted_page)) 675 dec_page_count(fio.sbi, F2FS_DIRTY_META); 676 677 set_page_writeback(fio.encrypted_page); 678 679 /* allocate block address */ 680 f2fs_wait_on_page_writeback(dn.node_page, NODE, true); 681 682 fio.op = REQ_OP_WRITE; 683 fio.op_flags = REQ_SYNC; 684 fio.new_blkaddr = newaddr; 685 f2fs_submit_page_write(&fio); 686 687 f2fs_update_data_blkaddr(&dn, newaddr); 688 set_inode_flag(inode, FI_APPEND_WRITE); 689 if (page->index == 0) 690 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); 691 put_page_out: 692 f2fs_put_page(fio.encrypted_page, 1); 693 recover_block: 694 if (err) 695 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, 696 true, true); 697 put_out: 698 f2fs_put_dnode(&dn); 699 out: 700 f2fs_put_page(page, 1); 701 } 702 703 static void move_data_page(struct inode *inode, block_t bidx, int gc_type, 704 unsigned int segno, int off) 705 { 706 struct page *page; 707 708 page = get_lock_data_page(inode, bidx, true); 709 if (IS_ERR(page)) 710 return; 711 712 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) 713 goto out; 714 715 if (f2fs_is_atomic_file(inode)) 716 goto out; 717 718 if (gc_type == BG_GC) { 719 if (PageWriteback(page)) 720 goto out; 721 set_page_dirty(page); 722 set_cold_data(page); 723 } else { 724 struct f2fs_io_info fio = { 725 .sbi = F2FS_I_SB(inode), 726 .type = DATA, 727 .temp = COLD, 728 .op = REQ_OP_WRITE, 729 .op_flags = REQ_SYNC, 730 .old_blkaddr = NULL_ADDR, 731 .page = page, 732 .encrypted_page = NULL, 733 .need_lock = LOCK_REQ, 734 }; 735 bool is_dirty = PageDirty(page); 736 int err; 737 738 retry: 739 set_page_dirty(page); 740 f2fs_wait_on_page_writeback(page, DATA, true); 741 if (clear_page_dirty_for_io(page)) { 742 inode_dec_dirty_pages(inode); 743 remove_dirty_inode(inode); 744 } 745 746 set_cold_data(page); 747 748 err = do_write_data_page(&fio); 749 if (err == -ENOMEM && is_dirty) { 750 congestion_wait(BLK_RW_ASYNC, HZ/50); 751 goto retry; 752 } 753 } 754 out: 755 f2fs_put_page(page, 1); 756 } 757 758 /* 759 * This function tries to get parent node of victim data block, and identifies 760 * data block validity. If the block is valid, copy that with cold status and 761 * modify parent node. 762 * If the parent node is not valid or the data block address is different, 763 * the victim data block is ignored. 764 */ 765 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 766 struct gc_inode_list *gc_list, unsigned int segno, int gc_type) 767 { 768 struct super_block *sb = sbi->sb; 769 struct f2fs_summary *entry; 770 block_t start_addr; 771 int off; 772 int phase = 0; 773 774 start_addr = START_BLOCK(sbi, segno); 775 776 next_step: 777 entry = sum; 778 779 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { 780 struct page *data_page; 781 struct inode *inode; 782 struct node_info dni; /* dnode info for the data */ 783 unsigned int ofs_in_node, nofs; 784 block_t start_bidx; 785 nid_t nid = le32_to_cpu(entry->nid); 786 787 /* stop BG_GC if there is not enough free sections. */ 788 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) 789 return; 790 791 if (check_valid_map(sbi, segno, off) == 0) 792 continue; 793 794 if (phase == 0) { 795 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, 796 META_NAT, true); 797 continue; 798 } 799 800 if (phase == 1) { 801 ra_node_page(sbi, nid); 802 continue; 803 } 804 805 /* Get an inode by ino with checking validity */ 806 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs)) 807 continue; 808 809 if (phase == 2) { 810 ra_node_page(sbi, dni.ino); 811 continue; 812 } 813 814 ofs_in_node = le16_to_cpu(entry->ofs_in_node); 815 816 if (phase == 3) { 817 inode = f2fs_iget(sb, dni.ino); 818 if (IS_ERR(inode) || is_bad_inode(inode)) 819 continue; 820 821 /* if encrypted inode, let's go phase 3 */ 822 if (f2fs_encrypted_inode(inode) && 823 S_ISREG(inode->i_mode)) { 824 add_gc_inode(gc_list, inode); 825 continue; 826 } 827 828 start_bidx = start_bidx_of_node(nofs, inode); 829 data_page = get_read_data_page(inode, 830 start_bidx + ofs_in_node, REQ_RAHEAD, 831 true); 832 if (IS_ERR(data_page)) { 833 iput(inode); 834 continue; 835 } 836 837 f2fs_put_page(data_page, 0); 838 add_gc_inode(gc_list, inode); 839 continue; 840 } 841 842 /* phase 4 */ 843 inode = find_gc_inode(gc_list, dni.ino); 844 if (inode) { 845 struct f2fs_inode_info *fi = F2FS_I(inode); 846 bool locked = false; 847 848 if (S_ISREG(inode->i_mode)) { 849 if (!down_write_trylock(&fi->dio_rwsem[READ])) 850 continue; 851 if (!down_write_trylock( 852 &fi->dio_rwsem[WRITE])) { 853 up_write(&fi->dio_rwsem[READ]); 854 continue; 855 } 856 locked = true; 857 } 858 859 start_bidx = start_bidx_of_node(nofs, inode) 860 + ofs_in_node; 861 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 862 move_encrypted_block(inode, start_bidx, segno, off); 863 else 864 move_data_page(inode, start_bidx, gc_type, segno, off); 865 866 if (locked) { 867 up_write(&fi->dio_rwsem[WRITE]); 868 up_write(&fi->dio_rwsem[READ]); 869 } 870 871 stat_inc_data_blk_count(sbi, 1, gc_type); 872 } 873 } 874 875 if (++phase < 5) 876 goto next_step; 877 } 878 879 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, 880 int gc_type) 881 { 882 struct sit_info *sit_i = SIT_I(sbi); 883 int ret; 884 885 mutex_lock(&sit_i->sentry_lock); 886 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, 887 NO_CHECK_TYPE, LFS); 888 mutex_unlock(&sit_i->sentry_lock); 889 return ret; 890 } 891 892 static int do_garbage_collect(struct f2fs_sb_info *sbi, 893 unsigned int start_segno, 894 struct gc_inode_list *gc_list, int gc_type) 895 { 896 struct page *sum_page; 897 struct f2fs_summary_block *sum; 898 struct blk_plug plug; 899 unsigned int segno = start_segno; 900 unsigned int end_segno = start_segno + sbi->segs_per_sec; 901 int sec_freed = 0; 902 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? 903 SUM_TYPE_DATA : SUM_TYPE_NODE; 904 905 /* readahead multi ssa blocks those have contiguous address */ 906 if (sbi->segs_per_sec > 1) 907 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), 908 sbi->segs_per_sec, META_SSA, true); 909 910 /* reference all summary page */ 911 while (segno < end_segno) { 912 sum_page = get_sum_page(sbi, segno++); 913 unlock_page(sum_page); 914 } 915 916 blk_start_plug(&plug); 917 918 for (segno = start_segno; segno < end_segno; segno++) { 919 920 /* find segment summary of victim */ 921 sum_page = find_get_page(META_MAPPING(sbi), 922 GET_SUM_BLOCK(sbi, segno)); 923 f2fs_put_page(sum_page, 0); 924 925 if (get_valid_blocks(sbi, segno, false) == 0 || 926 !PageUptodate(sum_page) || 927 unlikely(f2fs_cp_error(sbi))) 928 goto next; 929 930 sum = page_address(sum_page); 931 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer))); 932 933 /* 934 * this is to avoid deadlock: 935 * - lock_page(sum_page) - f2fs_replace_block 936 * - check_valid_map() - mutex_lock(sentry_lock) 937 * - mutex_lock(sentry_lock) - change_curseg() 938 * - lock_page(sum_page) 939 */ 940 if (type == SUM_TYPE_NODE) 941 gc_node_segment(sbi, sum->entries, segno, gc_type); 942 else 943 gc_data_segment(sbi, sum->entries, gc_list, segno, 944 gc_type); 945 946 stat_inc_seg_count(sbi, type, gc_type); 947 next: 948 f2fs_put_page(sum_page, 0); 949 } 950 951 if (gc_type == FG_GC) 952 f2fs_submit_merged_write(sbi, 953 (type == SUM_TYPE_NODE) ? NODE : DATA); 954 955 blk_finish_plug(&plug); 956 957 if (gc_type == FG_GC && 958 get_valid_blocks(sbi, start_segno, true) == 0) 959 sec_freed = 1; 960 961 stat_inc_call_count(sbi->stat_info); 962 963 return sec_freed; 964 } 965 966 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, 967 bool background, unsigned int segno) 968 { 969 int gc_type = sync ? FG_GC : BG_GC; 970 int sec_freed = 0; 971 int ret; 972 struct cp_control cpc; 973 unsigned int init_segno = segno; 974 struct gc_inode_list gc_list = { 975 .ilist = LIST_HEAD_INIT(gc_list.ilist), 976 .iroot = RADIX_TREE_INIT(GFP_NOFS), 977 }; 978 979 cpc.reason = __get_cp_reason(sbi); 980 gc_more: 981 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) { 982 ret = -EINVAL; 983 goto stop; 984 } 985 if (unlikely(f2fs_cp_error(sbi))) { 986 ret = -EIO; 987 goto stop; 988 } 989 990 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) { 991 /* 992 * For example, if there are many prefree_segments below given 993 * threshold, we can make them free by checkpoint. Then, we 994 * secure free segments which doesn't need fggc any more. 995 */ 996 if (prefree_segments(sbi)) { 997 ret = write_checkpoint(sbi, &cpc); 998 if (ret) 999 goto stop; 1000 } 1001 if (has_not_enough_free_secs(sbi, 0, 0)) 1002 gc_type = FG_GC; 1003 } 1004 1005 ret = -EINVAL; 1006 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */ 1007 if (gc_type == BG_GC && !background) 1008 goto stop; 1009 if (!__get_victim(sbi, &segno, gc_type)) 1010 goto stop; 1011 ret = 0; 1012 1013 if (do_garbage_collect(sbi, segno, &gc_list, gc_type) && 1014 gc_type == FG_GC) 1015 sec_freed++; 1016 1017 if (gc_type == FG_GC) 1018 sbi->cur_victim_sec = NULL_SEGNO; 1019 1020 if (!sync) { 1021 if (has_not_enough_free_secs(sbi, sec_freed, 0)) { 1022 segno = NULL_SEGNO; 1023 goto gc_more; 1024 } 1025 1026 if (gc_type == FG_GC) 1027 ret = write_checkpoint(sbi, &cpc); 1028 } 1029 stop: 1030 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0; 1031 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno; 1032 mutex_unlock(&sbi->gc_mutex); 1033 1034 put_gc_inode(&gc_list); 1035 1036 if (sync) 1037 ret = sec_freed ? 0 : -EAGAIN; 1038 return ret; 1039 } 1040 1041 void build_gc_manager(struct f2fs_sb_info *sbi) 1042 { 1043 u64 main_count, resv_count, ovp_count; 1044 1045 DIRTY_I(sbi)->v_ops = &default_v_ops; 1046 1047 /* threshold of # of valid blocks in a section for victims of FG_GC */ 1048 main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg; 1049 resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg; 1050 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg; 1051 1052 sbi->fggc_threshold = div64_u64((main_count - ovp_count) * 1053 BLKS_PER_SEC(sbi), (main_count - resv_count)); 1054 1055 /* give warm/cold data area from slower device */ 1056 if (sbi->s_ndevs && sbi->segs_per_sec == 1) 1057 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 1058 GET_SEGNO(sbi, FDEV(0).end_blk) + 1; 1059 } 1060