gc.c (9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e) | gc.c (b62e71be2110d8b52bf5faf3c3ed7ca1a0c113a5) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * fs/f2fs/gc.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8#include <linux/fs.h> --- 45 unchanged lines hidden (view full) --- 54 55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq)) 56 foreground = true; 57 58 /* give it a try one time */ 59 if (gc_th->gc_wake) 60 gc_th->gc_wake = false; 61 | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * fs/f2fs/gc.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8#include <linux/fs.h> --- 45 unchanged lines hidden (view full) --- 54 55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq)) 56 foreground = true; 57 58 /* give it a try one time */ 59 if (gc_th->gc_wake) 60 gc_th->gc_wake = false; 61 |
62 if (try_to_freeze()) { | 62 if (try_to_freeze() || f2fs_readonly(sbi->sb)) { |
63 stat_other_skip_bggc_count(sbi); 64 continue; 65 } 66 if (kthread_should_stop()) 67 break; 68 69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { 70 increase_sleep_time(gc_th, &wait_ms); --- 314 unchanged lines hidden (view full) --- 385 386 while (offset < end) { 387 if (test_bit(offset++, addr)) 388 ++sum; 389 } 390 return sum; 391} 392 | 63 stat_other_skip_bggc_count(sbi); 64 continue; 65 } 66 if (kthread_should_stop()) 67 break; 68 69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { 70 increase_sleep_time(gc_th, &wait_ms); --- 314 unchanged lines hidden (view full) --- 385 386 while (offset < end) { 387 if (test_bit(offset++, addr)) 388 ++sum; 389 } 390 return sum; 391} 392 |
393static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi, 394 unsigned long long mtime, unsigned int segno, 395 struct rb_node *parent, struct rb_node **p, 396 bool left_most) | 393static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi, 394 struct rb_root_cached *root) |
397{ | 395{ |
396#ifdef CONFIG_F2FS_CHECK_FS 397 struct rb_node *cur = rb_first_cached(root), *next; 398 struct victim_entry *cur_ve, *next_ve; 399 400 while (cur) { 401 next = rb_next(cur); 402 if (!next) 403 return true; 404 405 cur_ve = rb_entry(cur, struct victim_entry, rb_node); 406 next_ve = rb_entry(next, struct victim_entry, rb_node); 407 408 if (cur_ve->mtime > next_ve->mtime) { 409 f2fs_info(sbi, "broken victim_rbtree, " 410 "cur_mtime(%llu) next_mtime(%llu)", 411 cur_ve->mtime, next_ve->mtime); 412 return false; 413 } 414 cur = next; 415 } 416#endif 417 return true; 418} 419 420static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi, 421 unsigned long long mtime) 422{ |
|
398 struct atgc_management *am = &sbi->am; | 423 struct atgc_management *am = &sbi->am; |
424 struct rb_node *node = am->root.rb_root.rb_node; 425 struct victim_entry *ve = NULL; 426 427 while (node) { 428 ve = rb_entry(node, struct victim_entry, rb_node); 429 430 if (mtime < ve->mtime) 431 node = node->rb_left; 432 else 433 node = node->rb_right; 434 } 435 return ve; 436} 437 438static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi, 439 unsigned long long mtime, unsigned int segno) 440{ 441 struct atgc_management *am = &sbi->am; |
|
399 struct victim_entry *ve; 400 | 442 struct victim_entry *ve; 443 |
401 ve = f2fs_kmem_cache_alloc(victim_entry_slab, 402 GFP_NOFS, true, NULL); | 444 ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL); |
403 404 ve->mtime = mtime; 405 ve->segno = segno; 406 | 445 446 ve->mtime = mtime; 447 ve->segno = segno; 448 |
407 rb_link_node(&ve->rb_node, parent, p); 408 rb_insert_color_cached(&ve->rb_node, &am->root, left_most); 409 | |
410 list_add_tail(&ve->list, &am->victim_list); | 449 list_add_tail(&ve->list, &am->victim_list); |
411 | |
412 am->victim_count++; 413 414 return ve; 415} 416 | 450 am->victim_count++; 451 452 return ve; 453} 454 |
417static void insert_victim_entry(struct f2fs_sb_info *sbi, | 455static void __insert_victim_entry(struct f2fs_sb_info *sbi, |
418 unsigned long long mtime, unsigned int segno) 419{ 420 struct atgc_management *am = &sbi->am; | 456 unsigned long long mtime, unsigned int segno) 457{ 458 struct atgc_management *am = &sbi->am; |
421 struct rb_node **p; | 459 struct rb_root_cached *root = &am->root; 460 struct rb_node **p = &root->rb_root.rb_node; |
422 struct rb_node *parent = NULL; | 461 struct rb_node *parent = NULL; |
462 struct victim_entry *ve; |
|
423 bool left_most = true; 424 | 463 bool left_most = true; 464 |
425 p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most); 426 attach_victim_entry(sbi, mtime, segno, parent, p, left_most); | 465 /* look up rb tree to find parent node */ 466 while (*p) { 467 parent = *p; 468 ve = rb_entry(parent, struct victim_entry, rb_node); 469 470 if (mtime < ve->mtime) { 471 p = &(*p)->rb_left; 472 } else { 473 p = &(*p)->rb_right; 474 left_most = false; 475 } 476 } 477 478 ve = __create_victim_entry(sbi, mtime, segno); 479 480 rb_link_node(&ve->rb_node, parent, p); 481 rb_insert_color_cached(&ve->rb_node, root, left_most); |
427} 428 429static void add_victim_entry(struct f2fs_sb_info *sbi, 430 struct victim_sel_policy *p, unsigned int segno) 431{ 432 struct sit_info *sit_i = SIT_I(sbi); 433 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 434 unsigned int start = GET_SEG_FROM_SEC(sbi, secno); --- 19 unchanged lines hidden (view full) --- 454 sit_i->dirty_min_mtime = mtime; 455 if (mtime > sit_i->dirty_max_mtime) 456 sit_i->dirty_max_mtime = mtime; 457 458 /* don't choose young section as candidate */ 459 if (sit_i->dirty_max_mtime - mtime < p->age_threshold) 460 return; 461 | 482} 483 484static void add_victim_entry(struct f2fs_sb_info *sbi, 485 struct victim_sel_policy *p, unsigned int segno) 486{ 487 struct sit_info *sit_i = SIT_I(sbi); 488 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 489 unsigned int start = GET_SEG_FROM_SEC(sbi, secno); --- 19 unchanged lines hidden (view full) --- 509 sit_i->dirty_min_mtime = mtime; 510 if (mtime > sit_i->dirty_max_mtime) 511 sit_i->dirty_max_mtime = mtime; 512 513 /* don't choose young section as candidate */ 514 if (sit_i->dirty_max_mtime - mtime < p->age_threshold) 515 return; 516 |
462 insert_victim_entry(sbi, mtime, segno); | 517 __insert_victim_entry(sbi, mtime, segno); |
463} 464 | 518} 519 |
465static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi, 466 struct victim_sel_policy *p) 467{ 468 struct atgc_management *am = &sbi->am; 469 struct rb_node *parent = NULL; 470 bool left_most; 471 472 f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most); 473 474 return parent; 475} 476 | |
477static void atgc_lookup_victim(struct f2fs_sb_info *sbi, 478 struct victim_sel_policy *p) 479{ 480 struct sit_info *sit_i = SIT_I(sbi); 481 struct atgc_management *am = &sbi->am; 482 struct rb_root_cached *root = &am->root; 483 struct rb_node *node; | 520static void atgc_lookup_victim(struct f2fs_sb_info *sbi, 521 struct victim_sel_policy *p) 522{ 523 struct sit_info *sit_i = SIT_I(sbi); 524 struct atgc_management *am = &sbi->am; 525 struct rb_root_cached *root = &am->root; 526 struct rb_node *node; |
484 struct rb_entry *re; | |
485 struct victim_entry *ve; 486 unsigned long long total_time; 487 unsigned long long age, u, accu; 488 unsigned long long max_mtime = sit_i->dirty_max_mtime; 489 unsigned long long min_mtime = sit_i->dirty_min_mtime; 490 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi); 491 unsigned int vblocks; 492 unsigned int dirty_threshold = max(am->max_candidate_count, --- 10 unchanged lines hidden (view full) --- 503 total_time = max_mtime - min_mtime; 504 505 accu = div64_u64(ULLONG_MAX, total_time); 506 accu = min_t(unsigned long long, div_u64(accu, 100), 507 DEFAULT_ACCURACY_CLASS); 508 509 node = rb_first_cached(root); 510next: | 527 struct victim_entry *ve; 528 unsigned long long total_time; 529 unsigned long long age, u, accu; 530 unsigned long long max_mtime = sit_i->dirty_max_mtime; 531 unsigned long long min_mtime = sit_i->dirty_min_mtime; 532 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi); 533 unsigned int vblocks; 534 unsigned int dirty_threshold = max(am->max_candidate_count, --- 10 unchanged lines hidden (view full) --- 545 total_time = max_mtime - min_mtime; 546 547 accu = div64_u64(ULLONG_MAX, total_time); 548 accu = min_t(unsigned long long, div_u64(accu, 100), 549 DEFAULT_ACCURACY_CLASS); 550 551 node = rb_first_cached(root); 552next: |
511 re = rb_entry_safe(node, struct rb_entry, rb_node); 512 if (!re) | 553 ve = rb_entry_safe(node, struct victim_entry, rb_node); 554 if (!ve) |
513 return; 514 | 555 return; 556 |
515 ve = (struct victim_entry *)re; 516 | |
517 if (ve->mtime >= max_mtime || ve->mtime < min_mtime) 518 goto skip; 519 520 /* age = 10000 * x% * 60 */ 521 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) * 522 age_weight; 523 524 vblocks = get_valid_blocks(sbi, ve->segno, true); --- 25 unchanged lines hidden (view full) --- 550 * select candidates around source section in range of 551 * [target - dirty_threshold, target + dirty_threshold] 552 */ 553static void atssr_lookup_victim(struct f2fs_sb_info *sbi, 554 struct victim_sel_policy *p) 555{ 556 struct sit_info *sit_i = SIT_I(sbi); 557 struct atgc_management *am = &sbi->am; | 557 if (ve->mtime >= max_mtime || ve->mtime < min_mtime) 558 goto skip; 559 560 /* age = 10000 * x% * 60 */ 561 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) * 562 age_weight; 563 564 vblocks = get_valid_blocks(sbi, ve->segno, true); --- 25 unchanged lines hidden (view full) --- 590 * select candidates around source section in range of 591 * [target - dirty_threshold, target + dirty_threshold] 592 */ 593static void atssr_lookup_victim(struct f2fs_sb_info *sbi, 594 struct victim_sel_policy *p) 595{ 596 struct sit_info *sit_i = SIT_I(sbi); 597 struct atgc_management *am = &sbi->am; |
558 struct rb_node *node; 559 struct rb_entry *re; | |
560 struct victim_entry *ve; 561 unsigned long long age; 562 unsigned long long max_mtime = sit_i->dirty_max_mtime; 563 unsigned long long min_mtime = sit_i->dirty_min_mtime; 564 unsigned int seg_blocks = sbi->blocks_per_seg; 565 unsigned int vblocks; 566 unsigned int dirty_threshold = max(am->max_candidate_count, 567 am->candidate_ratio * 568 am->victim_count / 100); | 598 struct victim_entry *ve; 599 unsigned long long age; 600 unsigned long long max_mtime = sit_i->dirty_max_mtime; 601 unsigned long long min_mtime = sit_i->dirty_min_mtime; 602 unsigned int seg_blocks = sbi->blocks_per_seg; 603 unsigned int vblocks; 604 unsigned int dirty_threshold = max(am->max_candidate_count, 605 am->candidate_ratio * 606 am->victim_count / 100); |
569 unsigned int cost; 570 unsigned int iter = 0; | 607 unsigned int cost, iter; |
571 int stage = 0; 572 573 if (max_mtime < min_mtime) 574 return; 575 max_mtime += 1; 576next_stage: | 608 int stage = 0; 609 610 if (max_mtime < min_mtime) 611 return; 612 max_mtime += 1; 613next_stage: |
577 node = lookup_central_victim(sbi, p); | 614 iter = 0; 615 ve = __lookup_victim_entry(sbi, p->age); |
578next_node: | 616next_node: |
579 re = rb_entry_safe(node, struct rb_entry, rb_node); 580 if (!re) { 581 if (stage == 0) 582 goto skip_stage; | 617 if (!ve) { 618 if (stage++ == 0) 619 goto next_stage; |
583 return; 584 } 585 | 620 return; 621 } 622 |
586 ve = (struct victim_entry *)re; 587 | |
588 if (ve->mtime >= max_mtime || ve->mtime < min_mtime) 589 goto skip_node; 590 591 age = max_mtime - ve->mtime; 592 593 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks; 594 f2fs_bug_on(sbi, !vblocks); 595 --- 9 unchanged lines hidden (view full) --- 605 if (cost < p->min_cost || 606 (cost == p->min_cost && age > p->oldest_age)) { 607 p->min_cost = cost; 608 p->oldest_age = age; 609 p->min_segno = ve->segno; 610 } 611skip_node: 612 if (iter < dirty_threshold) { | 623 if (ve->mtime >= max_mtime || ve->mtime < min_mtime) 624 goto skip_node; 625 626 age = max_mtime - ve->mtime; 627 628 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks; 629 f2fs_bug_on(sbi, !vblocks); 630 --- 9 unchanged lines hidden (view full) --- 640 if (cost < p->min_cost || 641 (cost == p->min_cost && age > p->oldest_age)) { 642 p->min_cost = cost; 643 p->oldest_age = age; 644 p->min_segno = ve->segno; 645 } 646skip_node: 647 if (iter < dirty_threshold) { |
613 if (stage == 0) 614 node = rb_prev(node); 615 else if (stage == 1) 616 node = rb_next(node); | 648 ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) : 649 rb_next(&ve->rb_node), 650 struct victim_entry, rb_node); |
617 goto next_node; 618 } | 651 goto next_node; 652 } |
619skip_stage: 620 if (stage < 1) { 621 stage++; 622 iter = 0; | 653 654 if (stage++ == 0) |
623 goto next_stage; | 655 goto next_stage; |
624 } | |
625} | 656} |
657 |
|
626static void lookup_victim_by_age(struct f2fs_sb_info *sbi, 627 struct victim_sel_policy *p) 628{ | 658static void lookup_victim_by_age(struct f2fs_sb_info *sbi, 659 struct victim_sel_policy *p) 660{ |
629 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, 630 &sbi->am.root, true)); | 661 f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root)); |
631 632 if (p->gc_mode == GC_AT) 633 atgc_lookup_victim(sbi, p); 634 else if (p->alloc_mode == AT_SSR) 635 atssr_lookup_victim(sbi, p); 636 else 637 f2fs_bug_on(sbi, 1); 638} --- 66 unchanged lines hidden (view full) --- 705/* 706 * This function is called from two paths. 707 * One is garbage collection and the other is SSR segment selection. 708 * When it is called during GC, it just gets a victim segment 709 * and it does not remove it from dirty seglist. 710 * When it is called from SSR segment selection, it finds a segment 711 * which has minimum valid blocks and removes it from dirty seglist. 712 */ | 662 663 if (p->gc_mode == GC_AT) 664 atgc_lookup_victim(sbi, p); 665 else if (p->alloc_mode == AT_SSR) 666 atssr_lookup_victim(sbi, p); 667 else 668 f2fs_bug_on(sbi, 1); 669} --- 66 unchanged lines hidden (view full) --- 736/* 737 * This function is called from two paths. 738 * One is garbage collection and the other is SSR segment selection. 739 * When it is called during GC, it just gets a victim segment 740 * and it does not remove it from dirty seglist. 741 * When it is called from SSR segment selection, it finds a segment 742 * which has minimum valid blocks and removes it from dirty seglist. 743 */ |
713static int get_victim_by_default(struct f2fs_sb_info *sbi, 714 unsigned int *result, int gc_type, int type, 715 char alloc_mode, unsigned long long age) | 744int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result, 745 int gc_type, int type, char alloc_mode, 746 unsigned long long age) |
716{ 717 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 718 struct sit_info *sm = SIT_I(sbi); 719 struct victim_sel_policy p; 720 unsigned int secno, last_victim; 721 unsigned int last_segment; 722 unsigned int nsearched; 723 bool is_atgc; --- 177 unchanged lines hidden (view full) --- 901 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, 902 sbi->cur_victim_sec, 903 prefree_segments(sbi), free_segments(sbi)); 904 mutex_unlock(&dirty_i->seglist_lock); 905 906 return ret; 907} 908 | 747{ 748 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 749 struct sit_info *sm = SIT_I(sbi); 750 struct victim_sel_policy p; 751 unsigned int secno, last_victim; 752 unsigned int last_segment; 753 unsigned int nsearched; 754 bool is_atgc; --- 177 unchanged lines hidden (view full) --- 932 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, 933 sbi->cur_victim_sec, 934 prefree_segments(sbi), free_segments(sbi)); 935 mutex_unlock(&dirty_i->seglist_lock); 936 937 return ret; 938} 939 |
909static const struct victim_selection default_v_ops = { 910 .get_victim = get_victim_by_default, 911}; 912 | |
913static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino) 914{ 915 struct inode_entry *ie; 916 917 ie = radix_tree_lookup(&gc_list->iroot, ino); 918 if (ie) 919 return ie->inode; 920 return NULL; --- 663 unchanged lines hidden (view full) --- 1584 /* phase 4 */ 1585 inode = find_gc_inode(gc_list, dni.ino); 1586 if (inode) { 1587 struct f2fs_inode_info *fi = F2FS_I(inode); 1588 bool locked = false; 1589 int err; 1590 1591 if (S_ISREG(inode->i_mode)) { | 940static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino) 941{ 942 struct inode_entry *ie; 943 944 ie = radix_tree_lookup(&gc_list->iroot, ino); 945 if (ie) 946 return ie->inode; 947 return NULL; --- 663 unchanged lines hidden (view full) --- 1611 /* phase 4 */ 1612 inode = find_gc_inode(gc_list, dni.ino); 1613 if (inode) { 1614 struct f2fs_inode_info *fi = F2FS_I(inode); 1615 bool locked = false; 1616 int err; 1617 1618 if (S_ISREG(inode->i_mode)) { |
1592 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[READ])) { | 1619 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) { |
1593 sbi->skipped_gc_rwsem++; 1594 continue; 1595 } 1596 if (!f2fs_down_write_trylock( | 1620 sbi->skipped_gc_rwsem++; 1621 continue; 1622 } 1623 if (!f2fs_down_write_trylock( |
1597 &fi->i_gc_rwsem[WRITE])) { | 1624 &fi->i_gc_rwsem[READ])) { |
1598 sbi->skipped_gc_rwsem++; | 1625 sbi->skipped_gc_rwsem++; |
1599 f2fs_up_write(&fi->i_gc_rwsem[READ]); | 1626 f2fs_up_write(&fi->i_gc_rwsem[WRITE]); |
1600 continue; 1601 } 1602 locked = true; 1603 1604 /* wait for all inflight aio data */ 1605 inode_dio_wait(inode); 1606 } 1607 --- 6 unchanged lines hidden (view full) --- 1614 err = move_data_page(inode, start_bidx, gc_type, 1615 segno, off); 1616 1617 if (!err && (gc_type == FG_GC || 1618 f2fs_post_read_required(inode))) 1619 submitted++; 1620 1621 if (locked) { | 1627 continue; 1628 } 1629 locked = true; 1630 1631 /* wait for all inflight aio data */ 1632 inode_dio_wait(inode); 1633 } 1634 --- 6 unchanged lines hidden (view full) --- 1641 err = move_data_page(inode, start_bidx, gc_type, 1642 segno, off); 1643 1644 if (!err && (gc_type == FG_GC || 1645 f2fs_post_read_required(inode))) 1646 submitted++; 1647 1648 if (locked) { |
1622 f2fs_up_write(&fi->i_gc_rwsem[WRITE]); | |
1623 f2fs_up_write(&fi->i_gc_rwsem[READ]); | 1649 f2fs_up_write(&fi->i_gc_rwsem[READ]); |
1650 f2fs_up_write(&fi->i_gc_rwsem[WRITE]); |
|
1624 } 1625 1626 stat_inc_data_blk_count(sbi, 1, gc_type); 1627 } 1628 } 1629 1630 if (++phase < 5) 1631 goto next_step; 1632 1633 return submitted; 1634} 1635 1636static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, 1637 int gc_type) 1638{ 1639 struct sit_info *sit_i = SIT_I(sbi); 1640 int ret; 1641 1642 down_write(&sit_i->sentry_lock); | 1651 } 1652 1653 stat_inc_data_blk_count(sbi, 1, gc_type); 1654 } 1655 } 1656 1657 if (++phase < 5) 1658 goto next_step; 1659 1660 return submitted; 1661} 1662 1663static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, 1664 int gc_type) 1665{ 1666 struct sit_info *sit_i = SIT_I(sbi); 1667 int ret; 1668 1669 down_write(&sit_i->sentry_lock); |
1643 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, 1644 NO_CHECK_TYPE, LFS, 0); | 1670 ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, LFS, 0); |
1645 up_write(&sit_i->sentry_lock); 1646 return ret; 1647} 1648 1649static int do_garbage_collect(struct f2fs_sb_info *sbi, 1650 unsigned int start_segno, 1651 struct gc_inode_list *gc_list, int gc_type, 1652 bool force_migrate) --- 121 unchanged lines hidden (view full) --- 1774 int sec_freed = 0, seg_freed = 0, total_freed = 0; 1775 int ret = 0; 1776 struct cp_control cpc; 1777 struct gc_inode_list gc_list = { 1778 .ilist = LIST_HEAD_INIT(gc_list.ilist), 1779 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), 1780 }; 1781 unsigned int skipped_round = 0, round = 0; | 1671 up_write(&sit_i->sentry_lock); 1672 return ret; 1673} 1674 1675static int do_garbage_collect(struct f2fs_sb_info *sbi, 1676 unsigned int start_segno, 1677 struct gc_inode_list *gc_list, int gc_type, 1678 bool force_migrate) --- 121 unchanged lines hidden (view full) --- 1800 int sec_freed = 0, seg_freed = 0, total_freed = 0; 1801 int ret = 0; 1802 struct cp_control cpc; 1803 struct gc_inode_list gc_list = { 1804 .ilist = LIST_HEAD_INIT(gc_list.ilist), 1805 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), 1806 }; 1807 unsigned int skipped_round = 0, round = 0; |
1808 unsigned int upper_secs; |
|
1782 1783 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc, 1784 gc_control->nr_free_secs, 1785 get_pages(sbi, F2FS_DIRTY_NODES), 1786 get_pages(sbi, F2FS_DIRTY_DENTS), 1787 get_pages(sbi, F2FS_DIRTY_IMETA), 1788 free_sections(sbi), 1789 free_segments(sbi), 1790 reserved_segments(sbi), 1791 prefree_segments(sbi)); 1792 1793 cpc.reason = __get_cp_reason(sbi); | 1809 1810 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc, 1811 gc_control->nr_free_secs, 1812 get_pages(sbi, F2FS_DIRTY_NODES), 1813 get_pages(sbi, F2FS_DIRTY_DENTS), 1814 get_pages(sbi, F2FS_DIRTY_IMETA), 1815 free_sections(sbi), 1816 free_segments(sbi), 1817 reserved_segments(sbi), 1818 prefree_segments(sbi)); 1819 1820 cpc.reason = __get_cp_reason(sbi); |
1794 sbi->skipped_gc_rwsem = 0; | |
1795gc_more: | 1821gc_more: |
1822 sbi->skipped_gc_rwsem = 0; |
|
1796 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) { 1797 ret = -EINVAL; 1798 goto stop; 1799 } 1800 if (unlikely(f2fs_cp_error(sbi))) { 1801 ret = -EIO; 1802 goto stop; 1803 } 1804 | 1823 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) { 1824 ret = -EINVAL; 1825 goto stop; 1826 } 1827 if (unlikely(f2fs_cp_error(sbi))) { 1828 ret = -EIO; 1829 goto stop; 1830 } 1831 |
1805 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) { | 1832 /* Let's run FG_GC, if we don't have enough space. */ 1833 if (has_not_enough_free_secs(sbi, 0, 0)) { 1834 gc_type = FG_GC; 1835 |
1806 /* 1807 * For example, if there are many prefree_segments below given 1808 * threshold, we can make them free by checkpoint. Then, we 1809 * secure free segments which doesn't need fggc any more. 1810 */ 1811 if (prefree_segments(sbi)) { 1812 ret = f2fs_write_checkpoint(sbi, &cpc); 1813 if (ret) 1814 goto stop; 1815 } | 1836 /* 1837 * For example, if there are many prefree_segments below given 1838 * threshold, we can make them free by checkpoint. Then, we 1839 * secure free segments which doesn't need fggc any more. 1840 */ 1841 if (prefree_segments(sbi)) { 1842 ret = f2fs_write_checkpoint(sbi, &cpc); 1843 if (ret) 1844 goto stop; 1845 } |
1816 if (has_not_enough_free_secs(sbi, 0, 0)) 1817 gc_type = FG_GC; | |
1818 } 1819 1820 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */ 1821 if (gc_type == BG_GC && gc_control->no_bg_gc) { 1822 ret = -EINVAL; 1823 goto stop; 1824 } 1825retry: --- 10 unchanged lines hidden (view full) --- 1836 1837 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, 1838 gc_control->should_migrate_blocks); 1839 total_freed += seg_freed; 1840 1841 if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) 1842 sec_freed++; 1843 | 1846 } 1847 1848 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */ 1849 if (gc_type == BG_GC && gc_control->no_bg_gc) { 1850 ret = -EINVAL; 1851 goto stop; 1852 } 1853retry: --- 10 unchanged lines hidden (view full) --- 1864 1865 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, 1866 gc_control->should_migrate_blocks); 1867 total_freed += seg_freed; 1868 1869 if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) 1870 sec_freed++; 1871 |
1844 if (gc_type == FG_GC) | 1872 if (gc_type == FG_GC) { |
1845 sbi->cur_victim_sec = NULL_SEGNO; 1846 | 1873 sbi->cur_victim_sec = NULL_SEGNO; 1874 |
1847 if (gc_control->init_gc_type == FG_GC || 1848 !has_not_enough_free_secs(sbi, 1849 (gc_type == FG_GC) ? sec_freed : 0, 0)) { 1850 if (gc_type == FG_GC && sec_freed < gc_control->nr_free_secs) 1851 goto go_gc_more; 1852 goto stop; 1853 } 1854 1855 /* FG_GC stops GC by skip_count */ 1856 if (gc_type == FG_GC) { | 1875 if (has_enough_free_secs(sbi, sec_freed, 0)) { 1876 if (!gc_control->no_bg_gc && 1877 sec_freed < gc_control->nr_free_secs) 1878 goto go_gc_more; 1879 goto stop; 1880 } |
1857 if (sbi->skipped_gc_rwsem) 1858 skipped_round++; 1859 round++; 1860 if (skipped_round > MAX_SKIP_GC_COUNT && 1861 skipped_round * 2 >= round) { 1862 ret = f2fs_write_checkpoint(sbi, &cpc); 1863 goto stop; 1864 } | 1881 if (sbi->skipped_gc_rwsem) 1882 skipped_round++; 1883 round++; 1884 if (skipped_round > MAX_SKIP_GC_COUNT && 1885 skipped_round * 2 >= round) { 1886 ret = f2fs_write_checkpoint(sbi, &cpc); 1887 goto stop; 1888 } |
1889 } else if (has_enough_free_secs(sbi, 0, 0)) { 1890 goto stop; |
|
1865 } 1866 | 1891 } 1892 |
1867 /* Write checkpoint to reclaim prefree segments */ 1868 if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE && | 1893 __get_secs_required(sbi, NULL, &upper_secs, NULL); 1894 1895 /* 1896 * Write checkpoint to reclaim prefree segments. 1897 * We need more three extra sections for writer's data/node/dentry. 1898 */ 1899 if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS && |
1869 prefree_segments(sbi)) { 1870 ret = f2fs_write_checkpoint(sbi, &cpc); 1871 if (ret) 1872 goto stop; 1873 } 1874go_gc_more: 1875 segno = NULL_SEGNO; 1876 goto gc_more; --- 50 unchanged lines hidden (view full) --- 1927 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO; 1928 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT; 1929 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT; 1930 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD; 1931} 1932 1933void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) 1934{ | 1900 prefree_segments(sbi)) { 1901 ret = f2fs_write_checkpoint(sbi, &cpc); 1902 if (ret) 1903 goto stop; 1904 } 1905go_gc_more: 1906 segno = NULL_SEGNO; 1907 goto gc_more; --- 50 unchanged lines hidden (view full) --- 1958 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO; 1959 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT; 1960 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT; 1961 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD; 1962} 1963 1964void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) 1965{ |
1935 DIRTY_I(sbi)->v_ops = &default_v_ops; 1936 | |
1937 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; 1938 1939 /* give warm/cold data area from slower device */ 1940 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi)) 1941 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 1942 GET_SEGNO(sbi, FDEV(0).end_blk) + 1; 1943 1944 init_atgc_management(sbi); --- 114 unchanged lines hidden (view full) --- 2059 if (f2fs_is_multi_device(sbi)) { 2060 int last_dev = sbi->s_ndevs - 1; 2061 2062 FDEV(last_dev).total_segments = 2063 (int)FDEV(last_dev).total_segments + segs; 2064 FDEV(last_dev).end_blk = 2065 (long long)FDEV(last_dev).end_blk + blks; 2066#ifdef CONFIG_BLK_DEV_ZONED | 1966 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; 1967 1968 /* give warm/cold data area from slower device */ 1969 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi)) 1970 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 1971 GET_SEGNO(sbi, FDEV(0).end_blk) + 1; 1972 1973 init_atgc_management(sbi); --- 114 unchanged lines hidden (view full) --- 2088 if (f2fs_is_multi_device(sbi)) { 2089 int last_dev = sbi->s_ndevs - 1; 2090 2091 FDEV(last_dev).total_segments = 2092 (int)FDEV(last_dev).total_segments + segs; 2093 FDEV(last_dev).end_blk = 2094 (long long)FDEV(last_dev).end_blk + blks; 2095#ifdef CONFIG_BLK_DEV_ZONED |
2067 FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz + 2068 (int)(blks >> sbi->log_blocks_per_blkz); | 2096 FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz + 2097 div_u64(blks, sbi->blocks_per_blkz); |
2069#endif 2070 } 2071} 2072 2073int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) 2074{ 2075 __u64 old_block_count, shrunk_blocks; 2076 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; --- 117 unchanged lines hidden --- | 2098#endif 2099 } 2100} 2101 2102int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) 2103{ 2104 __u64 old_block_count, shrunk_blocks; 2105 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; --- 117 unchanged lines hidden --- |