gc.c (31d1b7710262fba12282b24083f20dc76e0efc93) | gc.c (5222595d093ebe80329d38d255d14316257afb3e) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * fs/f2fs/gc.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8#include <linux/fs.h> --- 128 unchanged lines hidden (view full) --- 137 gc_th->gc_wake= 0; 138 139 sbi->gc_thread = gc_th; 140 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); 141 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, 142 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); 143 if (IS_ERR(gc_th->f2fs_gc_task)) { 144 err = PTR_ERR(gc_th->f2fs_gc_task); | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * fs/f2fs/gc.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8#include <linux/fs.h> --- 128 unchanged lines hidden (view full) --- 137 gc_th->gc_wake= 0; 138 139 sbi->gc_thread = gc_th; 140 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); 141 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, 142 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); 143 if (IS_ERR(gc_th->f2fs_gc_task)) { 144 err = PTR_ERR(gc_th->f2fs_gc_task); |
145 kfree(gc_th); | 145 kvfree(gc_th); |
146 sbi->gc_thread = NULL; 147 } 148out: 149 return err; 150} 151 152void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi) 153{ 154 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; 155 if (!gc_th) 156 return; 157 kthread_stop(gc_th->f2fs_gc_task); | 146 sbi->gc_thread = NULL; 147 } 148out: 149 return err; 150} 151 152void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi) 153{ 154 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; 155 if (!gc_th) 156 return; 157 kthread_stop(gc_th->f2fs_gc_task); |
158 kfree(gc_th); | 158 kvfree(gc_th); |
159 sbi->gc_thread = NULL; 160} 161 162static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type) 163{ 164 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY; 165 166 switch (sbi->gc_mode) { --- 151 unchanged lines hidden (view full) --- 318 319 p.alloc_mode = alloc_mode; 320 select_policy(sbi, gc_type, type, &p); 321 322 p.min_segno = NULL_SEGNO; 323 p.min_cost = get_max_cost(sbi, &p); 324 325 if (*result != NULL_SEGNO) { | 159 sbi->gc_thread = NULL; 160} 161 162static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type) 163{ 164 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY; 165 166 switch (sbi->gc_mode) { --- 151 unchanged lines hidden (view full) --- 318 319 p.alloc_mode = alloc_mode; 320 select_policy(sbi, gc_type, type, &p); 321 322 p.min_segno = NULL_SEGNO; 323 p.min_cost = get_max_cost(sbi, &p); 324 325 if (*result != NULL_SEGNO) { |
326 if (IS_DATASEG(get_seg_entry(sbi, *result)->type) && 327 get_valid_blocks(sbi, *result, false) && | 326 if (get_valid_blocks(sbi, *result, false) && |
328 !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) 329 p.min_segno = *result; 330 goto out; 331 } 332 333 if (p.max_search == 0) 334 goto out; 335 | 327 !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) 328 p.min_segno = *result; 329 goto out; 330 } 331 332 if (p.max_search == 0) 333 goto out; 334 |
335 if (__is_large_section(sbi) && p.alloc_mode == LFS) { 336 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) { 337 p.min_segno = sbi->next_victim_seg[BG_GC]; 338 *result = p.min_segno; 339 sbi->next_victim_seg[BG_GC] = NULL_SEGNO; 340 goto got_result; 341 } 342 if (gc_type == FG_GC && 343 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) { 344 p.min_segno = sbi->next_victim_seg[FG_GC]; 345 *result = p.min_segno; 346 sbi->next_victim_seg[FG_GC] = NULL_SEGNO; 347 goto got_result; 348 } 349 } 350 |
|
336 last_victim = sm->last_victim[p.gc_mode]; 337 if (p.alloc_mode == LFS && gc_type == FG_GC) { 338 p.min_segno = check_bg_victims(sbi); 339 if (p.min_segno != NULL_SEGNO) 340 goto got_it; 341 } 342 343 while (1) { --- 46 unchanged lines hidden (view full) --- 390 else 391 sm->last_victim[p.gc_mode] = segno + 1; 392 sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi); 393 break; 394 } 395 } 396 if (p.min_segno != NULL_SEGNO) { 397got_it: | 351 last_victim = sm->last_victim[p.gc_mode]; 352 if (p.alloc_mode == LFS && gc_type == FG_GC) { 353 p.min_segno = check_bg_victims(sbi); 354 if (p.min_segno != NULL_SEGNO) 355 goto got_it; 356 } 357 358 while (1) { --- 46 unchanged lines hidden (view full) --- 405 else 406 sm->last_victim[p.gc_mode] = segno + 1; 407 sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi); 408 break; 409 } 410 } 411 if (p.min_segno != NULL_SEGNO) { 412got_it: |
413 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; 414got_result: |
|
398 if (p.alloc_mode == LFS) { 399 secno = GET_SEC_FROM_SEG(sbi, p.min_segno); 400 if (gc_type == FG_GC) 401 sbi->cur_victim_sec = secno; 402 else 403 set_bit(secno, dirty_i->victim_secmap); 404 } | 415 if (p.alloc_mode == LFS) { 416 secno = GET_SEC_FROM_SEG(sbi, p.min_segno); 417 if (gc_type == FG_GC) 418 sbi->cur_victim_sec = secno; 419 else 420 set_bit(secno, dirty_i->victim_secmap); 421 } |
405 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; | |
406 | 422 |
423 } 424out: 425 if (p.min_segno != NULL_SEGNO) |
|
407 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, 408 sbi->cur_victim_sec, 409 prefree_segments(sbi), free_segments(sbi)); | 426 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, 427 sbi->cur_victim_sec, 428 prefree_segments(sbi), free_segments(sbi)); |
410 } 411out: | |
412 mutex_unlock(&dirty_i->seglist_lock); 413 414 return (p.min_segno == NULL_SEGNO) ? 0 : 1; 415} 416 417static const struct victim_selection default_v_ops = { 418 .get_victim = get_victim_by_default, 419}; --- 233 unchanged lines hidden (view full) --- 653 err = -EFAULT; 654 goto put_page; 655 } 656got_it: 657 /* read page */ 658 fio.page = page; 659 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; 660 | 429 mutex_unlock(&dirty_i->seglist_lock); 430 431 return (p.min_segno == NULL_SEGNO) ? 0 : 1; 432} 433 434static const struct victim_selection default_v_ops = { 435 .get_victim = get_victim_by_default, 436}; --- 233 unchanged lines hidden (view full) --- 670 err = -EFAULT; 671 goto put_page; 672 } 673got_it: 674 /* read page */ 675 fio.page = page; 676 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; 677 |
678 /* 679 * don't cache encrypted data into meta inode until previous dirty 680 * data were writebacked to avoid racing between GC and flush. 681 */ 682 f2fs_wait_on_page_writeback(page, DATA, true); 683 684 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); 685 |
|
661 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi), 662 dn.data_blkaddr, 663 FGP_LOCK | FGP_CREAT, GFP_NOFS); 664 if (!fio.encrypted_page) { 665 err = -ENOMEM; 666 goto put_page; 667 } 668 --- 71 unchanged lines hidden (view full) --- 740 } 741 742 /* 743 * don't cache encrypted data into meta inode until previous dirty 744 * data were writebacked to avoid racing between GC and flush. 745 */ 746 f2fs_wait_on_page_writeback(page, DATA, true); 747 | 686 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi), 687 dn.data_blkaddr, 688 FGP_LOCK | FGP_CREAT, GFP_NOFS); 689 if (!fio.encrypted_page) { 690 err = -ENOMEM; 691 goto put_page; 692 } 693 --- 71 unchanged lines hidden (view full) --- 765 } 766 767 /* 768 * don't cache encrypted data into meta inode until previous dirty 769 * data were writebacked to avoid racing between GC and flush. 770 */ 771 f2fs_wait_on_page_writeback(page, DATA, true); 772 |
773 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); 774 |
|
748 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni); 749 if (err) 750 goto put_out; 751 752 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); 753 754 /* read page */ 755 fio.page = page; --- 41 unchanged lines hidden (view full) --- 797 goto put_page_out; 798 } 799 if (unlikely(!PageUptodate(fio.encrypted_page))) { 800 err = -EIO; 801 goto put_page_out; 802 } 803 804write_page: | 775 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni); 776 if (err) 777 goto put_out; 778 779 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); 780 781 /* read page */ 782 fio.page = page; --- 41 unchanged lines hidden (view full) --- 824 goto put_page_out; 825 } 826 if (unlikely(!PageUptodate(fio.encrypted_page))) { 827 err = -EIO; 828 goto put_page_out; 829 } 830 831write_page: |
805 set_page_dirty(fio.encrypted_page); | |
806 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true); | 832 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true); |
833 set_page_dirty(fio.encrypted_page); |
|
807 if (clear_page_dirty_for_io(fio.encrypted_page)) 808 dec_page_count(fio.sbi, F2FS_DIRTY_META); 809 810 set_page_writeback(fio.encrypted_page); 811 ClearPageError(page); 812 813 /* allocate block address */ 814 f2fs_wait_on_page_writeback(dn.node_page, NODE, true); --- 77 unchanged lines hidden (view full) --- 892 .page = page, 893 .encrypted_page = NULL, 894 .need_lock = LOCK_REQ, 895 .io_type = FS_GC_DATA_IO, 896 }; 897 bool is_dirty = PageDirty(page); 898 899retry: | 834 if (clear_page_dirty_for_io(fio.encrypted_page)) 835 dec_page_count(fio.sbi, F2FS_DIRTY_META); 836 837 set_page_writeback(fio.encrypted_page); 838 ClearPageError(page); 839 840 /* allocate block address */ 841 f2fs_wait_on_page_writeback(dn.node_page, NODE, true); --- 77 unchanged lines hidden (view full) --- 919 .page = page, 920 .encrypted_page = NULL, 921 .need_lock = LOCK_REQ, 922 .io_type = FS_GC_DATA_IO, 923 }; 924 bool is_dirty = PageDirty(page); 925 926retry: |
900 set_page_dirty(page); | |
901 f2fs_wait_on_page_writeback(page, DATA, true); | 927 f2fs_wait_on_page_writeback(page, DATA, true); |
928 929 set_page_dirty(page); |
|
902 if (clear_page_dirty_for_io(page)) { 903 inode_dec_dirty_pages(inode); 904 f2fs_remove_dirty_inode(inode); 905 } 906 907 set_cold_data(page); 908 909 err = f2fs_do_write_data_page(&fio); --- 178 unchanged lines hidden (view full) --- 1088 unsigned int start_segno, 1089 struct gc_inode_list *gc_list, int gc_type) 1090{ 1091 struct page *sum_page; 1092 struct f2fs_summary_block *sum; 1093 struct blk_plug plug; 1094 unsigned int segno = start_segno; 1095 unsigned int end_segno = start_segno + sbi->segs_per_sec; | 930 if (clear_page_dirty_for_io(page)) { 931 inode_dec_dirty_pages(inode); 932 f2fs_remove_dirty_inode(inode); 933 } 934 935 set_cold_data(page); 936 937 err = f2fs_do_write_data_page(&fio); --- 178 unchanged lines hidden (view full) --- 1116 unsigned int start_segno, 1117 struct gc_inode_list *gc_list, int gc_type) 1118{ 1119 struct page *sum_page; 1120 struct f2fs_summary_block *sum; 1121 struct blk_plug plug; 1122 unsigned int segno = start_segno; 1123 unsigned int end_segno = start_segno + sbi->segs_per_sec; |
1096 int seg_freed = 0; | 1124 int seg_freed = 0, migrated = 0; |
1097 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? 1098 SUM_TYPE_DATA : SUM_TYPE_NODE; 1099 int submitted = 0; 1100 | 1125 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? 1126 SUM_TYPE_DATA : SUM_TYPE_NODE; 1127 int submitted = 0; 1128 |
1129 if (__is_large_section(sbi)) 1130 end_segno = rounddown(end_segno, sbi->segs_per_sec); 1131 |
|
1101 /* readahead multi ssa blocks those have contiguous address */ | 1132 /* readahead multi ssa blocks those have contiguous address */ |
1102 if (sbi->segs_per_sec > 1) | 1133 if (__is_large_section(sbi)) |
1103 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), | 1134 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), |
1104 sbi->segs_per_sec, META_SSA, true); | 1135 end_segno - segno, META_SSA, true); |
1105 1106 /* reference all summary page */ 1107 while (segno < end_segno) { 1108 sum_page = f2fs_get_sum_page(sbi, segno++); 1109 if (IS_ERR(sum_page)) { 1110 int err = PTR_ERR(sum_page); 1111 1112 end_segno = segno - 1; --- 12 unchanged lines hidden (view full) --- 1125 1126 for (segno = start_segno; segno < end_segno; segno++) { 1127 1128 /* find segment summary of victim */ 1129 sum_page = find_get_page(META_MAPPING(sbi), 1130 GET_SUM_BLOCK(sbi, segno)); 1131 f2fs_put_page(sum_page, 0); 1132 | 1136 1137 /* reference all summary page */ 1138 while (segno < end_segno) { 1139 sum_page = f2fs_get_sum_page(sbi, segno++); 1140 if (IS_ERR(sum_page)) { 1141 int err = PTR_ERR(sum_page); 1142 1143 end_segno = segno - 1; --- 12 unchanged lines hidden (view full) --- 1156 1157 for (segno = start_segno; segno < end_segno; segno++) { 1158 1159 /* find segment summary of victim */ 1160 sum_page = find_get_page(META_MAPPING(sbi), 1161 GET_SUM_BLOCK(sbi, segno)); 1162 f2fs_put_page(sum_page, 0); 1163 |
1133 if (get_valid_blocks(sbi, segno, false) == 0 || 1134 !PageUptodate(sum_page) || 1135 unlikely(f2fs_cp_error(sbi))) 1136 goto next; | 1164 if (get_valid_blocks(sbi, segno, false) == 0) 1165 goto freed; 1166 if (__is_large_section(sbi) && 1167 migrated >= sbi->migration_granularity) 1168 goto skip; 1169 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi))) 1170 goto skip; |
1137 1138 sum = page_address(sum_page); 1139 if (type != GET_SUM_TYPE((&sum->footer))) { 1140 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) " 1141 "type [%d, %d] in SSA and SIT", 1142 segno, type, GET_SUM_TYPE((&sum->footer))); 1143 set_sbi_flag(sbi, SBI_NEED_FSCK); | 1171 1172 sum = page_address(sum_page); 1173 if (type != GET_SUM_TYPE((&sum->footer))) { 1174 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) " 1175 "type [%d, %d] in SSA and SIT", 1176 segno, type, GET_SUM_TYPE((&sum->footer))); 1177 set_sbi_flag(sbi, SBI_NEED_FSCK); |
1144 goto next; | 1178 goto skip; |
1145 } 1146 1147 /* 1148 * this is to avoid deadlock: 1149 * - lock_page(sum_page) - f2fs_replace_block 1150 * - check_valid_map() - down_write(sentry_lock) 1151 * - down_read(sentry_lock) - change_curseg() 1152 * - lock_page(sum_page) 1153 */ 1154 if (type == SUM_TYPE_NODE) 1155 submitted += gc_node_segment(sbi, sum->entries, segno, 1156 gc_type); 1157 else 1158 submitted += gc_data_segment(sbi, sum->entries, gc_list, 1159 segno, gc_type); 1160 1161 stat_inc_seg_count(sbi, type, gc_type); 1162 | 1179 } 1180 1181 /* 1182 * this is to avoid deadlock: 1183 * - lock_page(sum_page) - f2fs_replace_block 1184 * - check_valid_map() - down_write(sentry_lock) 1185 * - down_read(sentry_lock) - change_curseg() 1186 * - lock_page(sum_page) 1187 */ 1188 if (type == SUM_TYPE_NODE) 1189 submitted += gc_node_segment(sbi, sum->entries, segno, 1190 gc_type); 1191 else 1192 submitted += gc_data_segment(sbi, sum->entries, gc_list, 1193 segno, gc_type); 1194 1195 stat_inc_seg_count(sbi, type, gc_type); 1196 |
1197freed: |
|
1163 if (gc_type == FG_GC && 1164 get_valid_blocks(sbi, segno, false) == 0) 1165 seg_freed++; | 1198 if (gc_type == FG_GC && 1199 get_valid_blocks(sbi, segno, false) == 0) 1200 seg_freed++; |
1166next: | 1201 migrated++; 1202 1203 if (__is_large_section(sbi) && segno + 1 < end_segno) 1204 sbi->next_victim_seg[gc_type] = segno + 1; 1205skip: |
1167 f2fs_put_page(sum_page, 0); 1168 } 1169 1170 if (submitted) 1171 f2fs_submit_merged_write(sbi, 1172 (type == SUM_TYPE_NODE) ? NODE : DATA); 1173 1174 blk_finish_plug(&plug); --- 127 unchanged lines hidden (view full) --- 1302 1303void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) 1304{ 1305 DIRTY_I(sbi)->v_ops = &default_v_ops; 1306 1307 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; 1308 1309 /* give warm/cold data area from slower device */ | 1206 f2fs_put_page(sum_page, 0); 1207 } 1208 1209 if (submitted) 1210 f2fs_submit_merged_write(sbi, 1211 (type == SUM_TYPE_NODE) ? NODE : DATA); 1212 1213 blk_finish_plug(&plug); --- 127 unchanged lines hidden (view full) --- 1341 1342void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) 1343{ 1344 DIRTY_I(sbi)->v_ops = &default_v_ops; 1345 1346 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; 1347 1348 /* give warm/cold data area from slower device */ |
1310 if (sbi->s_ndevs && sbi->segs_per_sec == 1) | 1349 if (sbi->s_ndevs && !__is_large_section(sbi)) |
1311 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 1312 GET_SEGNO(sbi, FDEV(0).end_blk) + 1; 1313} | 1350 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 1351 GET_SEGNO(sbi, FDEV(0).end_blk) + 1; 1352} |