segment.c (bab475c5414e8d1fa182fd17ae966864e9c85741) | segment.c (4354994f097d068a894aa1a0860da54571df3582) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * fs/f2fs/segment.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8#include <linux/fs.h> --- 162 unchanged lines hidden (view full) --- 171 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 172 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 173 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA); 174 175 if (test_opt(sbi, LFS)) 176 return false; 177 if (sbi->gc_mode == GC_URGENT) 178 return true; | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * fs/f2fs/segment.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8#include <linux/fs.h> --- 162 unchanged lines hidden (view full) --- 171 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 172 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 173 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA); 174 175 if (test_opt(sbi, LFS)) 176 return false; 177 if (sbi->gc_mode == GC_URGENT) 178 return true; |
179 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 180 return true; |
|
179 180 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs + 181 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi)); 182} 183 184void f2fs_register_inmem_page(struct inode *inode, struct page *page) 185{ 186 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); --- 288 unchanged lines hidden (view full) --- 475 f2fs_show_injection_info(FAULT_CHECKPOINT); 476 f2fs_stop_checkpoint(sbi, false); 477 } 478 479 /* balance_fs_bg is able to be pending */ 480 if (need && excess_cached_nats(sbi)) 481 f2fs_balance_fs_bg(sbi); 482 | 181 182 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs + 183 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi)); 184} 185 186void f2fs_register_inmem_page(struct inode *inode, struct page *page) 187{ 188 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); --- 288 unchanged lines hidden (view full) --- 477 f2fs_show_injection_info(FAULT_CHECKPOINT); 478 f2fs_stop_checkpoint(sbi, false); 479 } 480 481 /* balance_fs_bg is able to be pending */ 482 if (need && excess_cached_nats(sbi)) 483 f2fs_balance_fs_bg(sbi); 484 |
485 if (f2fs_is_checkpoint_ready(sbi)) 486 return; 487 |
|
483 /* 484 * We should do GC or end up with checkpoint, if there are so many dirty 485 * dir/node pages without enough free segments. 486 */ 487 if (has_not_enough_free_secs(sbi, 0, 0)) { 488 mutex_lock(&sbi->gc_mutex); 489 f2fs_gc(sbi, false, false, NULL_SEGNO); 490 } --- 300 unchanged lines hidden (view full) --- 791/* 792 * Should not occur error such as -ENOMEM. 793 * Adding dirty entry into seglist is not critical operation. 794 * If a given segment is one of current working segments, it won't be added. 795 */ 796static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) 797{ 798 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | 488 /* 489 * We should do GC or end up with checkpoint, if there are so many dirty 490 * dir/node pages without enough free segments. 491 */ 492 if (has_not_enough_free_secs(sbi, 0, 0)) { 493 mutex_lock(&sbi->gc_mutex); 494 f2fs_gc(sbi, false, false, NULL_SEGNO); 495 } --- 300 unchanged lines hidden (view full) --- 796/* 797 * Should not occur error such as -ENOMEM. 798 * Adding dirty entry into seglist is not critical operation. 799 * If a given segment is one of current working segments, it won't be added. 800 */ 801static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) 802{ 803 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); |
799 unsigned short valid_blocks; | 804 unsigned short valid_blocks, ckpt_valid_blocks; |
800 801 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) 802 return; 803 804 mutex_lock(&dirty_i->seglist_lock); 805 806 valid_blocks = get_valid_blocks(sbi, segno, false); | 805 806 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) 807 return; 808 809 mutex_lock(&dirty_i->seglist_lock); 810 811 valid_blocks = get_valid_blocks(sbi, segno, false); |
812 ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno); |
|
807 | 813 |
808 if (valid_blocks == 0) { | 814 if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) || 815 ckpt_valid_blocks == sbi->blocks_per_seg)) { |
809 __locate_dirty_segment(sbi, segno, PRE); 810 __remove_dirty_segment(sbi, segno, DIRTY); 811 } else if (valid_blocks < sbi->blocks_per_seg) { 812 __locate_dirty_segment(sbi, segno, DIRTY); 813 } else { 814 /* Recovery routine with SSR needs this */ 815 __remove_dirty_segment(sbi, segno, DIRTY); 816 } 817 818 mutex_unlock(&dirty_i->seglist_lock); 819} 820 | 816 __locate_dirty_segment(sbi, segno, PRE); 817 __remove_dirty_segment(sbi, segno, DIRTY); 818 } else if (valid_blocks < sbi->blocks_per_seg) { 819 __locate_dirty_segment(sbi, segno, DIRTY); 820 } else { 821 /* Recovery routine with SSR needs this */ 822 __remove_dirty_segment(sbi, segno, DIRTY); 823 } 824 825 mutex_unlock(&dirty_i->seglist_lock); 826} 827 |
828/* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */ 829void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi) 830{ 831 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 832 unsigned int segno; 833 834 mutex_lock(&dirty_i->seglist_lock); 835 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { 836 if (get_valid_blocks(sbi, segno, false)) 837 continue; 838 if (IS_CURSEG(sbi, segno)) 839 continue; 840 __locate_dirty_segment(sbi, segno, PRE); 841 __remove_dirty_segment(sbi, segno, DIRTY); 842 } 843 mutex_unlock(&dirty_i->seglist_lock); 844} 845 846int f2fs_disable_cp_again(struct f2fs_sb_info *sbi) 847{ 848 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 849 block_t ovp = overprovision_segments(sbi) << sbi->log_blocks_per_seg; 850 block_t holes[2] = {0, 0}; /* DATA and NODE */ 851 struct seg_entry *se; 852 unsigned int segno; 853 854 mutex_lock(&dirty_i->seglist_lock); 855 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { 856 se = get_seg_entry(sbi, segno); 857 if (IS_NODESEG(se->type)) 858 holes[NODE] += sbi->blocks_per_seg - se->valid_blocks; 859 else 860 holes[DATA] += sbi->blocks_per_seg - se->valid_blocks; 861 } 862 mutex_unlock(&dirty_i->seglist_lock); 863 864 if (holes[DATA] > ovp || holes[NODE] > ovp) 865 return -EAGAIN; 866 return 0; 867} 868 869/* This is only used by SBI_CP_DISABLED */ 870static unsigned int get_free_segment(struct f2fs_sb_info *sbi) 871{ 872 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 873 unsigned int segno = 0; 874 875 mutex_lock(&dirty_i->seglist_lock); 876 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { 877 if (get_valid_blocks(sbi, segno, false)) 878 continue; 879 if (get_ckpt_valid_blocks(sbi, segno)) 880 continue; 881 mutex_unlock(&dirty_i->seglist_lock); 882 return segno; 883 } 884 mutex_unlock(&dirty_i->seglist_lock); 885 return NULL_SEGNO; 886} 887 |
|
821static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi, 822 struct block_device *bdev, block_t lstart, 823 block_t start, block_t len) 824{ 825 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 826 struct list_head *pend_list; 827 struct discard_cmd *dc; 828 --- 1194 unchanged lines hidden (view full) --- 2023 se->valid_blocks--; 2024 del = 0; 2025 } 2026 2027 if (!f2fs_test_and_set_bit(offset, se->discard_map)) 2028 sbi->discard_blks--; 2029 2030 /* don't overwrite by SSR to keep node chain */ | 888static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi, 889 struct block_device *bdev, block_t lstart, 890 block_t start, block_t len) 891{ 892 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 893 struct list_head *pend_list; 894 struct discard_cmd *dc; 895 --- 1194 unchanged lines hidden (view full) --- 2090 se->valid_blocks--; 2091 del = 0; 2092 } 2093 2094 if (!f2fs_test_and_set_bit(offset, se->discard_map)) 2095 sbi->discard_blks--; 2096 2097 /* don't overwrite by SSR to keep node chain */ |
2031 if (IS_NODESEG(se->type)) { | 2098 if (IS_NODESEG(se->type) && 2099 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { |
2032 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) 2033 se->ckpt_valid_blocks++; 2034 } 2035 } else { 2036 exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map); 2037#ifdef CONFIG_F2FS_CHECK_FS 2038 mir_exist = f2fs_test_and_clear_bit(offset, 2039 se->cur_valid_map_mir); --- 5 unchanged lines hidden (view full) --- 2045 } 2046#endif 2047 if (unlikely(!exist)) { 2048 f2fs_msg(sbi->sb, KERN_ERR, 2049 "Bitmap was wrongly cleared, blk:%u", blkaddr); 2050 f2fs_bug_on(sbi, 1); 2051 se->valid_blocks++; 2052 del = 0; | 2100 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) 2101 se->ckpt_valid_blocks++; 2102 } 2103 } else { 2104 exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map); 2105#ifdef CONFIG_F2FS_CHECK_FS 2106 mir_exist = f2fs_test_and_clear_bit(offset, 2107 se->cur_valid_map_mir); --- 5 unchanged lines hidden (view full) --- 2113 } 2114#endif 2115 if (unlikely(!exist)) { 2116 f2fs_msg(sbi->sb, KERN_ERR, 2117 "Bitmap was wrongly cleared, blk:%u", blkaddr); 2118 f2fs_bug_on(sbi, 1); 2119 se->valid_blocks++; 2120 del = 0; |
2121 } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2122 /* 2123 * If checkpoints are off, we must not reuse data that 2124 * was used in the previous checkpoint. If it was used 2125 * before, we must track that to know how much space we 2126 * really have. 2127 */ 2128 if (f2fs_test_bit(offset, se->ckpt_valid_map)) 2129 sbi->unusable_block_count++; |
|
2053 } 2054 2055 if (f2fs_test_and_clear_bit(offset, se->discard_map)) 2056 sbi->discard_blks++; 2057 } 2058 if (!f2fs_test_bit(offset, se->ckpt_valid_map)) 2059 se->ckpt_valid_blocks += del; 2060 --- 266 unchanged lines hidden (view full) --- 2327} 2328 2329static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type) 2330{ 2331 /* if segs_per_sec is large than 1, we need to keep original policy. */ 2332 if (sbi->segs_per_sec != 1) 2333 return CURSEG_I(sbi, type)->segno; 2334 | 2130 } 2131 2132 if (f2fs_test_and_clear_bit(offset, se->discard_map)) 2133 sbi->discard_blks++; 2134 } 2135 if (!f2fs_test_bit(offset, se->ckpt_valid_map)) 2136 se->ckpt_valid_blocks += del; 2137 --- 266 unchanged lines hidden (view full) --- 2404} 2405 2406static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type) 2407{ 2408 /* if segs_per_sec is large than 1, we need to keep original policy. */ 2409 if (sbi->segs_per_sec != 1) 2410 return CURSEG_I(sbi, type)->segno; 2411 |
2412 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2413 return 0; 2414 |
|
2335 if (test_opt(sbi, NOHEAP) && 2336 (type == CURSEG_HOT_DATA || IS_NODESEG(type))) 2337 return 0; 2338 2339 if (SIT_I(sbi)->last_victim[ALLOC_NEXT]) 2340 return SIT_I(sbi)->last_victim[ALLOC_NEXT]; 2341 2342 /* find segments from 0 to reuse freed segments */ --- 128 unchanged lines hidden (view full) --- 2471 for (; cnt-- > 0; reversed ? i-- : i++) { 2472 if (i == type) 2473 continue; 2474 if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) { 2475 curseg->next_segno = segno; 2476 return 1; 2477 } 2478 } | 2415 if (test_opt(sbi, NOHEAP) && 2416 (type == CURSEG_HOT_DATA || IS_NODESEG(type))) 2417 return 0; 2418 2419 if (SIT_I(sbi)->last_victim[ALLOC_NEXT]) 2420 return SIT_I(sbi)->last_victim[ALLOC_NEXT]; 2421 2422 /* find segments from 0 to reuse freed segments */ --- 128 unchanged lines hidden (view full) --- 2551 for (; cnt-- > 0; reversed ? i-- : i++) { 2552 if (i == type) 2553 continue; 2554 if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) { 2555 curseg->next_segno = segno; 2556 return 1; 2557 } 2558 } |
2559 2560 /* find valid_blocks=0 in dirty list */ 2561 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2562 segno = get_free_segment(sbi); 2563 if (segno != NULL_SEGNO) { 2564 curseg->next_segno = segno; 2565 return 1; 2566 } 2567 } |
|
2479 return 0; 2480} 2481 2482/* 2483 * flush out current segment and replace it with new segment 2484 * This function should be returned with success, otherwise BUG 2485 */ 2486static void allocate_segment_by_default(struct f2fs_sb_info *sbi, 2487 int type, bool force) 2488{ 2489 struct curseg_info *curseg = CURSEG_I(sbi, type); 2490 2491 if (force) 2492 new_curseg(sbi, type, true); 2493 else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) && 2494 type == CURSEG_WARM_NODE) 2495 new_curseg(sbi, type, false); | 2568 return 0; 2569} 2570 2571/* 2572 * flush out current segment and replace it with new segment 2573 * This function should be returned with success, otherwise BUG 2574 */ 2575static void allocate_segment_by_default(struct f2fs_sb_info *sbi, 2576 int type, bool force) 2577{ 2578 struct curseg_info *curseg = CURSEG_I(sbi, type); 2579 2580 if (force) 2581 new_curseg(sbi, type, true); 2582 else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) && 2583 type == CURSEG_WARM_NODE) 2584 new_curseg(sbi, type, false); |
2496 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type)) | 2585 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type) && 2586 likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED))) |
2497 new_curseg(sbi, type, false); 2498 else if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type)) 2499 change_curseg(sbi, type); 2500 else 2501 new_curseg(sbi, type, false); 2502 2503 stat_inc_seg_type(sbi, curseg); 2504} --- 1824 unchanged lines hidden --- | 2587 new_curseg(sbi, type, false); 2588 else if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type)) 2589 change_curseg(sbi, type); 2590 else 2591 new_curseg(sbi, type, false); 2592 2593 stat_inc_seg_type(sbi, curseg); 2594} --- 1824 unchanged lines hidden --- |