Lines Matching full:sbi
171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi) in f2fs_need_SSR() argument
173 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); in f2fs_need_SSR()
174 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); in f2fs_need_SSR()
175 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA); in f2fs_need_SSR()
177 if (f2fs_lfs_mode(sbi)) in f2fs_need_SSR()
179 if (sbi->gc_mode == GC_URGENT_HIGH) in f2fs_need_SSR()
181 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) in f2fs_need_SSR()
184 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs + in f2fs_need_SSR()
185 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi)); in f2fs_need_SSR()
221 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in __replace_atomic_write_block() local
237 err = f2fs_get_node_info(sbi, dn.nid, &ni, false); in __replace_atomic_write_block()
247 dec_valid_block_count(sbi, inode, 1); in __replace_atomic_write_block()
248 f2fs_invalidate_blocks(sbi, dn.data_blkaddr); in __replace_atomic_write_block()
251 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, in __replace_atomic_write_block()
257 err = inc_valid_block_count(sbi, inode, &count, true); in __replace_atomic_write_block()
265 dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count); in __replace_atomic_write_block()
267 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr, in __replace_atomic_write_block()
304 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in __f2fs_commit_atomic_write() local
339 } else if (!f2fs_is_valid_blkaddr(sbi, blkaddr, in __f2fs_commit_atomic_write()
343 f2fs_handle_error(sbi, in __f2fs_commit_atomic_write()
371 sbi->revoked_atomic_block += fi->atomic_write_cnt; in __f2fs_commit_atomic_write()
373 sbi->committed_atomic_block += fi->atomic_write_cnt; in __f2fs_commit_atomic_write()
388 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_commit_atomic_write() local
397 f2fs_lock_op(sbi); in f2fs_commit_atomic_write()
401 f2fs_unlock_op(sbi); in f2fs_commit_atomic_write()
411 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) in f2fs_balance_fs() argument
413 if (time_to_inject(sbi, FAULT_CHECKPOINT)) in f2fs_balance_fs()
414 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT); in f2fs_balance_fs()
417 if (need && excess_cached_nats(sbi)) in f2fs_balance_fs()
418 f2fs_balance_fs_bg(sbi, false); in f2fs_balance_fs()
420 if (!f2fs_is_checkpoint_ready(sbi)) in f2fs_balance_fs()
427 if (has_enough_free_secs(sbi, 0, 0)) in f2fs_balance_fs()
430 if (test_opt(sbi, GC_MERGE) && sbi->gc_thread && in f2fs_balance_fs()
431 sbi->gc_thread->f2fs_gc_task) { in f2fs_balance_fs()
434 prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait, in f2fs_balance_fs()
436 wake_up(&sbi->gc_thread->gc_wait_queue_head); in f2fs_balance_fs()
438 finish_wait(&sbi->gc_thread->fggc_wq, &wait); in f2fs_balance_fs()
447 f2fs_down_write(&sbi->gc_lock); in f2fs_balance_fs()
448 stat_inc_gc_call_count(sbi, FOREGROUND); in f2fs_balance_fs()
449 f2fs_gc(sbi, &gc_control); in f2fs_balance_fs()
453 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi) in excess_dirty_threshold() argument
455 int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2; in excess_dirty_threshold()
456 unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS); in excess_dirty_threshold()
457 unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA); in excess_dirty_threshold()
458 unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES); in excess_dirty_threshold()
459 unsigned int meta = get_pages(sbi, F2FS_DIRTY_META); in excess_dirty_threshold()
460 unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA); in excess_dirty_threshold()
462 sbi->log_blocks_per_seg; in excess_dirty_threshold()
472 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg) in f2fs_balance_fs_bg() argument
474 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in f2fs_balance_fs_bg()
478 if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE)) in f2fs_balance_fs_bg()
479 f2fs_shrink_read_extent_tree(sbi, in f2fs_balance_fs_bg()
483 if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE)) in f2fs_balance_fs_bg()
484 f2fs_shrink_age_extent_tree(sbi, in f2fs_balance_fs_bg()
488 if (!f2fs_available_free_memory(sbi, NAT_ENTRIES)) in f2fs_balance_fs_bg()
489 f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); in f2fs_balance_fs_bg()
491 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) in f2fs_balance_fs_bg()
492 f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS); in f2fs_balance_fs_bg()
494 f2fs_build_free_nids(sbi, false, false); in f2fs_balance_fs_bg()
496 if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) || in f2fs_balance_fs_bg()
497 excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi)) in f2fs_balance_fs_bg()
501 if (is_inflight_io(sbi, REQ_TIME) || in f2fs_balance_fs_bg()
502 (!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem))) in f2fs_balance_fs_bg()
506 if (f2fs_time_over(sbi, CP_TIME)) in f2fs_balance_fs_bg()
510 if (f2fs_available_free_memory(sbi, NAT_ENTRIES) && in f2fs_balance_fs_bg()
511 f2fs_available_free_memory(sbi, INO_ENTRIES)) in f2fs_balance_fs_bg()
515 if (test_opt(sbi, DATA_FLUSH) && from_bg) { in f2fs_balance_fs_bg()
518 mutex_lock(&sbi->flush_lock); in f2fs_balance_fs_bg()
521 f2fs_sync_dirty_inodes(sbi, FILE_INODE, false); in f2fs_balance_fs_bg()
524 mutex_unlock(&sbi->flush_lock); in f2fs_balance_fs_bg()
526 stat_inc_cp_call_count(sbi, BACKGROUND); in f2fs_balance_fs_bg()
527 f2fs_sync_fs(sbi->sb, 1); in f2fs_balance_fs_bg()
530 static int __submit_flush_wait(struct f2fs_sb_info *sbi, in __submit_flush_wait() argument
535 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER), in __submit_flush_wait()
536 test_opt(sbi, FLUSH_MERGE), ret); in __submit_flush_wait()
538 f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0); in __submit_flush_wait()
542 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino) in submit_flush_wait() argument
547 if (!f2fs_is_multi_device(sbi)) in submit_flush_wait()
548 return __submit_flush_wait(sbi, sbi->sb->s_bdev); in submit_flush_wait()
550 for (i = 0; i < sbi->s_ndevs; i++) { in submit_flush_wait()
551 if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO)) in submit_flush_wait()
553 ret = __submit_flush_wait(sbi, FDEV(i).bdev); in submit_flush_wait()
562 struct f2fs_sb_info *sbi = data; in issue_flush_thread() local
563 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; in issue_flush_thread()
578 ret = submit_flush_wait(sbi, cmd->ino); in issue_flush_thread()
594 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino) in f2fs_issue_flush() argument
596 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; in f2fs_issue_flush()
600 if (test_opt(sbi, NOBARRIER)) in f2fs_issue_flush()
603 if (!test_opt(sbi, FLUSH_MERGE)) { in f2fs_issue_flush()
605 ret = submit_flush_wait(sbi, ino); in f2fs_issue_flush()
612 f2fs_is_multi_device(sbi)) { in f2fs_issue_flush()
613 ret = submit_flush_wait(sbi, ino); in f2fs_issue_flush()
648 ret = submit_flush_wait(sbi, ino); in f2fs_issue_flush()
665 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi) in f2fs_create_flush_cmd_control() argument
667 dev_t dev = sbi->sb->s_bdev->bd_dev; in f2fs_create_flush_cmd_control()
670 if (SM_I(sbi)->fcc_info) { in f2fs_create_flush_cmd_control()
671 fcc = SM_I(sbi)->fcc_info; in f2fs_create_flush_cmd_control()
677 fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL); in f2fs_create_flush_cmd_control()
684 SM_I(sbi)->fcc_info = fcc; in f2fs_create_flush_cmd_control()
685 if (!test_opt(sbi, FLUSH_MERGE)) in f2fs_create_flush_cmd_control()
689 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, in f2fs_create_flush_cmd_control()
701 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free) in f2fs_destroy_flush_cmd_control() argument
703 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; in f2fs_destroy_flush_cmd_control()
713 SM_I(sbi)->fcc_info = NULL; in f2fs_destroy_flush_cmd_control()
717 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi) in f2fs_flush_device_cache() argument
721 if (!f2fs_is_multi_device(sbi)) in f2fs_flush_device_cache()
724 if (test_opt(sbi, NOBARRIER)) in f2fs_flush_device_cache()
727 for (i = 1; i < sbi->s_ndevs; i++) { in f2fs_flush_device_cache()
730 if (!f2fs_test_bit(i, (char *)&sbi->dirty_device)) in f2fs_flush_device_cache()
734 ret = __submit_flush_wait(sbi, FDEV(i).bdev); in f2fs_flush_device_cache()
740 f2fs_stop_checkpoint(sbi, false, in f2fs_flush_device_cache()
745 spin_lock(&sbi->dev_lock); in f2fs_flush_device_cache()
746 f2fs_clear_bit(i, (char *)&sbi->dirty_device); in f2fs_flush_device_cache()
747 spin_unlock(&sbi->dev_lock); in f2fs_flush_device_cache()
753 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, in __locate_dirty_segment() argument
756 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in __locate_dirty_segment()
759 if (IS_CURSEG(sbi, segno)) in __locate_dirty_segment()
766 struct seg_entry *sentry = get_seg_entry(sbi, segno); in __locate_dirty_segment()
770 f2fs_bug_on(sbi, 1); in __locate_dirty_segment()
776 if (__is_large_section(sbi)) { in __locate_dirty_segment()
777 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); in __locate_dirty_segment()
779 get_valid_blocks(sbi, segno, true); in __locate_dirty_segment()
781 f2fs_bug_on(sbi, unlikely(!valid_blocks || in __locate_dirty_segment()
782 valid_blocks == CAP_BLKS_PER_SEC(sbi))); in __locate_dirty_segment()
784 if (!IS_CURSEC(sbi, secno)) in __locate_dirty_segment()
790 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, in __remove_dirty_segment() argument
793 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in __remove_dirty_segment()
800 struct seg_entry *sentry = get_seg_entry(sbi, segno); in __remove_dirty_segment()
806 valid_blocks = get_valid_blocks(sbi, segno, true); in __remove_dirty_segment()
808 clear_bit(GET_SEC_FROM_SEG(sbi, segno), in __remove_dirty_segment()
811 clear_bit(segno, SIT_I(sbi)->invalid_segmap); in __remove_dirty_segment()
814 if (__is_large_section(sbi)) { in __remove_dirty_segment()
815 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); in __remove_dirty_segment()
818 valid_blocks == CAP_BLKS_PER_SEC(sbi)) { in __remove_dirty_segment()
823 if (!IS_CURSEC(sbi, secno)) in __remove_dirty_segment()
834 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) in locate_dirty_segment() argument
836 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in locate_dirty_segment()
840 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) in locate_dirty_segment()
843 usable_blocks = f2fs_usable_blks_in_seg(sbi, segno); in locate_dirty_segment()
846 valid_blocks = get_valid_blocks(sbi, segno, false); in locate_dirty_segment()
847 ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false); in locate_dirty_segment()
849 if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) || in locate_dirty_segment()
851 __locate_dirty_segment(sbi, segno, PRE); in locate_dirty_segment()
852 __remove_dirty_segment(sbi, segno, DIRTY); in locate_dirty_segment()
854 __locate_dirty_segment(sbi, segno, DIRTY); in locate_dirty_segment()
857 __remove_dirty_segment(sbi, segno, DIRTY); in locate_dirty_segment()
864 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi) in f2fs_dirty_to_prefree() argument
866 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in f2fs_dirty_to_prefree()
870 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { in f2fs_dirty_to_prefree()
871 if (get_valid_blocks(sbi, segno, false)) in f2fs_dirty_to_prefree()
873 if (IS_CURSEG(sbi, segno)) in f2fs_dirty_to_prefree()
875 __locate_dirty_segment(sbi, segno, PRE); in f2fs_dirty_to_prefree()
876 __remove_dirty_segment(sbi, segno, DIRTY); in f2fs_dirty_to_prefree()
881 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi) in f2fs_get_unusable_blocks() argument
884 (overprovision_segments(sbi) - reserved_segments(sbi)); in f2fs_get_unusable_blocks()
885 block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg; in f2fs_get_unusable_blocks()
886 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in f2fs_get_unusable_blocks()
893 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { in f2fs_get_unusable_blocks()
894 se = get_seg_entry(sbi, segno); in f2fs_get_unusable_blocks()
896 holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) - in f2fs_get_unusable_blocks()
899 holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) - in f2fs_get_unusable_blocks()
910 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable) in f2fs_disable_cp_again() argument
913 (overprovision_segments(sbi) - reserved_segments(sbi)); in f2fs_disable_cp_again()
914 if (unusable > F2FS_OPTION(sbi).unusable_cap) in f2fs_disable_cp_again()
916 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) && in f2fs_disable_cp_again()
917 dirty_segments(sbi) > ovp_hole_segs) in f2fs_disable_cp_again()
923 static unsigned int get_free_segment(struct f2fs_sb_info *sbi) in get_free_segment() argument
925 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in get_free_segment()
929 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { in get_free_segment()
930 if (get_valid_blocks(sbi, segno, false)) in get_free_segment()
932 if (get_ckpt_valid_blocks(sbi, segno, false)) in get_free_segment()
941 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi, in __create_discard_cmd() argument
945 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __create_discard_cmd()
949 f2fs_bug_on(sbi, !len); in __create_discard_cmd()
973 static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi) in f2fs_check_discard_tree() argument
976 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_check_discard_tree()
989 f2fs_info(sbi, "broken discard_rbtree, " in f2fs_check_discard_tree()
1001 static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi, in __lookup_discard_cmd() argument
1004 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __lookup_discard_cmd()
1093 static void __remove_discard_cmd(struct f2fs_sb_info *sbi, in __remove_discard_cmd() argument
1096 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __remove_discard_cmd()
1108 f2fs_bug_on(sbi, dc->ref); in __remove_discard_cmd()
1114 f2fs_info_ratelimited(sbi, in __remove_discard_cmd()
1137 static void __check_sit_bitmap(struct f2fs_sb_info *sbi, in __check_sit_bitmap() argument
1147 segno = GET_SEGNO(sbi, blk); in __check_sit_bitmap()
1148 sentry = get_seg_entry(sbi, segno); in __check_sit_bitmap()
1149 offset = GET_BLKOFF_FROM_SEG0(sbi, blk); in __check_sit_bitmap()
1151 if (end < START_BLOCK(sbi, segno + 1)) in __check_sit_bitmap()
1152 size = GET_BLKOFF_FROM_SEG0(sbi, end); in __check_sit_bitmap()
1154 size = BLKS_PER_SEG(sbi); in __check_sit_bitmap()
1157 f2fs_bug_on(sbi, offset != size); in __check_sit_bitmap()
1158 blk = START_BLOCK(sbi, segno + 1); in __check_sit_bitmap()
1163 static void __init_discard_policy(struct f2fs_sb_info *sbi, in __init_discard_policy() argument
1167 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __init_discard_policy()
1186 if (utilization(sbi) > dcc->discard_urgent_util) { in __init_discard_policy()
1207 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1212 static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi, in __submit_zone_reset_cmd() argument
1217 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __submit_zone_reset_cmd()
1237 __check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len); in __submit_zone_reset_cmd()
1245 f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE); in __submit_zone_reset_cmd()
1250 static int __submit_discard_cmd(struct f2fs_sb_info *sbi, in __submit_discard_cmd() argument
1257 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __submit_discard_cmd()
1267 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) in __submit_discard_cmd()
1271 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) { in __submit_discard_cmd()
1272 int devi = f2fs_bdev_index(sbi, bdev); in __submit_discard_cmd()
1277 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) { in __submit_discard_cmd()
1278 __submit_zone_reset_cmd(sbi, dc, flag, in __submit_discard_cmd()
1319 if (time_to_inject(sbi, FAULT_DISCARD)) { in __submit_discard_cmd()
1336 f2fs_bug_on(sbi, !bio); in __submit_discard_cmd()
1355 __check_sit_bitmap(sbi, lstart, lstart + len); in __submit_discard_cmd()
1364 f2fs_update_iostat(sbi, NULL, FS_DISCARD_IO, len * F2FS_BLKSIZE); in __submit_discard_cmd()
1374 __update_discard_tree_range(sbi, bdev, lstart, start, len); in __submit_discard_cmd()
1379 static void __insert_discard_cmd(struct f2fs_sb_info *sbi, in __insert_discard_cmd() argument
1383 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __insert_discard_cmd()
1400 f2fs_bug_on(sbi, 1); in __insert_discard_cmd()
1404 dc = __create_discard_cmd(sbi, bdev, lstart, start, len); in __insert_discard_cmd()
1416 static void __punch_discard_cmd(struct f2fs_sb_info *sbi, in __punch_discard_cmd() argument
1419 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __punch_discard_cmd()
1424 __remove_discard_cmd(sbi, dc); in __punch_discard_cmd()
1439 __insert_discard_cmd(sbi, dc->bdev, blkaddr + 1, in __punch_discard_cmd()
1452 static void __update_discard_tree_range(struct f2fs_sb_info *sbi, in __update_discard_tree_range() argument
1456 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __update_discard_tree_range()
1521 __remove_discard_cmd(sbi, tdc); in __update_discard_tree_range()
1526 __insert_discard_cmd(sbi, bdev, in __update_discard_tree_range()
1539 static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi, in __queue_zone_reset_cmd() argument
1545 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock); in __queue_zone_reset_cmd()
1546 __insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen); in __queue_zone_reset_cmd()
1547 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock); in __queue_zone_reset_cmd()
1551 static void __queue_discard_cmd(struct f2fs_sb_info *sbi, in __queue_discard_cmd() argument
1561 if (f2fs_is_multi_device(sbi)) { in __queue_discard_cmd()
1562 int devi = f2fs_target_device_index(sbi, blkstart); in __queue_discard_cmd()
1566 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock); in __queue_discard_cmd()
1567 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen); in __queue_discard_cmd()
1568 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock); in __queue_discard_cmd()
1571 static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi, in __issue_discard_cmd_orderly() argument
1574 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __issue_discard_cmd_orderly()
1596 if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) { in __issue_discard_cmd_orderly()
1602 err = __submit_discard_cmd(sbi, dpolicy, dc, issued); in __issue_discard_cmd_orderly()
1609 __remove_discard_cmd(sbi, dc); in __issue_discard_cmd_orderly()
1623 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1626 static int __issue_discard_cmd(struct f2fs_sb_info *sbi, in __issue_discard_cmd() argument
1629 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __issue_discard_cmd()
1637 f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT); in __issue_discard_cmd()
1643 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) in __issue_discard_cmd()
1650 __issue_discard_cmd_orderly(sbi, dpolicy, &issued); in __issue_discard_cmd()
1660 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); in __issue_discard_cmd()
1663 f2fs_bug_on(sbi, dc->state != D_PREP); in __issue_discard_cmd()
1666 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) in __issue_discard_cmd()
1670 !is_idle(sbi, DISCARD_TIME)) { in __issue_discard_cmd()
1675 __submit_discard_cmd(sbi, dpolicy, dc, &issued); in __issue_discard_cmd()
1689 __wait_all_discard_cmd(sbi, dpolicy); in __issue_discard_cmd()
1699 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi) in __drop_discard_cmd() argument
1701 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __drop_discard_cmd()
1711 f2fs_bug_on(sbi, dc->state != D_PREP); in __drop_discard_cmd()
1712 __remove_discard_cmd(sbi, dc); in __drop_discard_cmd()
1721 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi) in f2fs_drop_discard_cmd() argument
1723 __drop_discard_cmd(sbi); in f2fs_drop_discard_cmd()
1726 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi, in __wait_one_discard_bio() argument
1729 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __wait_one_discard_bio()
1734 f2fs_bug_on(sbi, dc->state != D_DONE); in __wait_one_discard_bio()
1739 __remove_discard_cmd(sbi, dc); in __wait_one_discard_bio()
1746 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi, in __wait_discard_cmd_range() argument
1750 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __wait_discard_cmd_range()
1770 __remove_discard_cmd(sbi, iter); in __wait_discard_cmd_range()
1780 trimmed += __wait_one_discard_bio(sbi, dc); in __wait_discard_cmd_range()
1787 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi, in __wait_all_discard_cmd() argument
1794 return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX); in __wait_all_discard_cmd()
1797 __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, MIN_DISCARD_GRANULARITY); in __wait_all_discard_cmd()
1798 discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX); in __wait_all_discard_cmd()
1799 __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, MIN_DISCARD_GRANULARITY); in __wait_all_discard_cmd()
1800 discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX); in __wait_all_discard_cmd()
1806 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr) in f2fs_wait_discard_bio() argument
1808 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_wait_discard_bio()
1813 dc = __lookup_discard_cmd(sbi, blkaddr); in f2fs_wait_discard_bio()
1815 if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) { in f2fs_wait_discard_bio()
1816 int devi = f2fs_bdev_index(sbi, dc->bdev); in f2fs_wait_discard_bio()
1823 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) { in f2fs_wait_discard_bio()
1826 __submit_zone_reset_cmd(sbi, dc, REQ_SYNC, in f2fs_wait_discard_bio()
1831 __wait_one_discard_bio(sbi, dc); in f2fs_wait_discard_bio()
1838 __punch_discard_cmd(sbi, dc, blkaddr); in f2fs_wait_discard_bio()
1847 __wait_one_discard_bio(sbi, dc); in f2fs_wait_discard_bio()
1850 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi) in f2fs_stop_discard_thread() argument
1852 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_stop_discard_thread()
1864 * @sbi: the f2fs_sb_info data for discard cmd to issue
1870 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi) in f2fs_issue_discard_timeout() argument
1872 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_issue_discard_timeout()
1879 __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT, in f2fs_issue_discard_timeout()
1881 __issue_discard_cmd(sbi, &dpolicy); in f2fs_issue_discard_timeout()
1882 dropped = __drop_discard_cmd(sbi); in f2fs_issue_discard_timeout()
1885 __wait_all_discard_cmd(sbi, NULL); in f2fs_issue_discard_timeout()
1887 f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt)); in f2fs_issue_discard_timeout()
1893 struct f2fs_sb_info *sbi = data; in issue_discard_thread() local
1894 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in issue_discard_thread()
1908 if (sbi->gc_mode == GC_URGENT_HIGH || in issue_discard_thread()
1909 !f2fs_available_free_memory(sbi, DISCARD_CACHE)) in issue_discard_thread()
1910 __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, in issue_discard_thread()
1913 __init_discard_policy(sbi, &dpolicy, DPOLICY_BG, in issue_discard_thread()
1921 __wait_all_discard_cmd(sbi, NULL); in issue_discard_thread()
1925 if (f2fs_readonly(sbi->sb)) in issue_discard_thread()
1929 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) || in issue_discard_thread()
1935 sb_start_intwrite(sbi->sb); in issue_discard_thread()
1937 issued = __issue_discard_cmd(sbi, &dpolicy); in issue_discard_thread()
1939 __wait_all_discard_cmd(sbi, &dpolicy); in issue_discard_thread()
1942 wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME); in issue_discard_thread()
1951 sb_end_intwrite(sbi->sb); in issue_discard_thread()
1958 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, in __f2fs_issue_discard_zone() argument
1966 if (f2fs_is_multi_device(sbi)) { in __f2fs_issue_discard_zone()
1967 devi = f2fs_target_device_index(sbi, blkstart); in __f2fs_issue_discard_zone()
1970 f2fs_err(sbi, "Invalid block %x", blkstart); in __f2fs_issue_discard_zone()
1977 if (f2fs_blkz_is_seq(sbi, devi, blkstart)) { in __f2fs_issue_discard_zone()
1983 f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)", in __f2fs_issue_discard_zone()
1984 devi, sbi->s_ndevs ? FDEV(devi).path : "", in __f2fs_issue_discard_zone()
1989 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) { in __f2fs_issue_discard_zone()
1995 __queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen); in __f2fs_issue_discard_zone()
2000 __queue_discard_cmd(sbi, bdev, lblkstart, blklen); in __f2fs_issue_discard_zone()
2005 static int __issue_discard_async(struct f2fs_sb_info *sbi, in __issue_discard_async() argument
2009 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) in __issue_discard_async()
2010 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen); in __issue_discard_async()
2012 __queue_discard_cmd(sbi, bdev, blkstart, blklen); in __issue_discard_async()
2016 static int f2fs_issue_discard(struct f2fs_sb_info *sbi, in f2fs_issue_discard() argument
2026 bdev = f2fs_target_device(sbi, blkstart, NULL); in f2fs_issue_discard()
2031 f2fs_target_device(sbi, i, NULL); in f2fs_issue_discard()
2034 err = __issue_discard_async(sbi, bdev, in f2fs_issue_discard()
2044 se = get_seg_entry(sbi, GET_SEGNO(sbi, i)); in f2fs_issue_discard()
2045 offset = GET_BLKOFF_FROM_SEG0(sbi, i); in f2fs_issue_discard()
2047 if (f2fs_block_unit_discard(sbi) && in f2fs_issue_discard()
2049 sbi->discard_blks--; in f2fs_issue_discard()
2053 err = __issue_discard_async(sbi, bdev, start, len); in f2fs_issue_discard()
2057 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc, in add_discard_addrs() argument
2061 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); in add_discard_addrs()
2065 unsigned long *dmap = SIT_I(sbi)->tmp_map; in add_discard_addrs()
2069 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list; in add_discard_addrs()
2072 if (se->valid_blocks == BLKS_PER_SEG(sbi) || in add_discard_addrs()
2073 !f2fs_hw_support_discard(sbi) || in add_discard_addrs()
2074 !f2fs_block_unit_discard(sbi)) in add_discard_addrs()
2078 if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks || in add_discard_addrs()
2079 SM_I(sbi)->dcc_info->nr_discards >= in add_discard_addrs()
2080 SM_I(sbi)->dcc_info->max_discards) in add_discard_addrs()
2089 while (force || SM_I(sbi)->dcc_info->nr_discards <= in add_discard_addrs()
2090 SM_I(sbi)->dcc_info->max_discards) { in add_discard_addrs()
2091 start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1); in add_discard_addrs()
2092 if (start >= BLKS_PER_SEG(sbi)) in add_discard_addrs()
2096 BLKS_PER_SEG(sbi), start + 1); in add_discard_addrs()
2097 if (force && start && end != BLKS_PER_SEG(sbi) && in add_discard_addrs()
2107 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start); in add_discard_addrs()
2114 SM_I(sbi)->dcc_info->nr_discards += end - start; in add_discard_addrs()
2125 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi) in f2fs_release_discard_addrs() argument
2127 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list); in f2fs_release_discard_addrs()
2138 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) in set_prefree_as_free_segments() argument
2140 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in set_prefree_as_free_segments()
2144 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi)) in set_prefree_as_free_segments()
2145 __set_test_and_free(sbi, segno, false); in set_prefree_as_free_segments()
2149 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, in f2fs_clear_prefree_segments() argument
2152 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_clear_prefree_segments()
2155 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in f2fs_clear_prefree_segments()
2160 bool section_alignment = F2FS_OPTION(sbi).discard_unit == in f2fs_clear_prefree_segments()
2163 if (f2fs_lfs_mode(sbi) && __is_large_section(sbi)) in f2fs_clear_prefree_segments()
2173 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1); in f2fs_clear_prefree_segments()
2174 if (start >= MAIN_SEGS(sbi)) in f2fs_clear_prefree_segments()
2176 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi), in f2fs_clear_prefree_segments()
2180 start = rounddown(start, SEGS_PER_SEC(sbi)); in f2fs_clear_prefree_segments()
2181 end = roundup(end, SEGS_PER_SEC(sbi)); in f2fs_clear_prefree_segments()
2189 if (!f2fs_realtime_discard_enable(sbi)) in f2fs_clear_prefree_segments()
2197 if (!f2fs_sb_has_blkzoned(sbi) && in f2fs_clear_prefree_segments()
2198 (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) { in f2fs_clear_prefree_segments()
2199 f2fs_issue_discard(sbi, START_BLOCK(sbi, start), in f2fs_clear_prefree_segments()
2200 (end - start) << sbi->log_blocks_per_seg); in f2fs_clear_prefree_segments()
2204 secno = GET_SEC_FROM_SEG(sbi, start); in f2fs_clear_prefree_segments()
2205 start_segno = GET_SEG_FROM_SEC(sbi, secno); in f2fs_clear_prefree_segments()
2206 if (!IS_CURSEC(sbi, secno) && in f2fs_clear_prefree_segments()
2207 !get_valid_blocks(sbi, start, true)) in f2fs_clear_prefree_segments()
2208 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno), in f2fs_clear_prefree_segments()
2209 BLKS_PER_SEC(sbi)); in f2fs_clear_prefree_segments()
2211 start = start_segno + SEGS_PER_SEC(sbi); in f2fs_clear_prefree_segments()
2219 if (!f2fs_block_unit_discard(sbi)) in f2fs_clear_prefree_segments()
2230 BLKS_PER_SEG(sbi), cur_pos); in f2fs_clear_prefree_segments()
2233 if (f2fs_sb_has_blkzoned(sbi) || in f2fs_clear_prefree_segments()
2237 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos, in f2fs_clear_prefree_segments()
2242 BLKS_PER_SEG(sbi), cur_pos); in f2fs_clear_prefree_segments()
2248 if (cur_pos < BLKS_PER_SEG(sbi)) in f2fs_clear_prefree_segments()
2256 wake_up_discard_thread(sbi, false); in f2fs_clear_prefree_segments()
2259 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi) in f2fs_start_discard_thread() argument
2261 dev_t dev = sbi->sb->s_bdev->bd_dev; in f2fs_start_discard_thread()
2262 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_start_discard_thread()
2265 if (!f2fs_realtime_discard_enable(sbi)) in f2fs_start_discard_thread()
2268 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi, in f2fs_start_discard_thread()
2278 static int create_discard_cmd_control(struct f2fs_sb_info *sbi) in create_discard_cmd_control() argument
2283 if (SM_I(sbi)->dcc_info) { in create_discard_cmd_control()
2284 dcc = SM_I(sbi)->dcc_info; in create_discard_cmd_control()
2288 dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL); in create_discard_cmd_control()
2295 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT) in create_discard_cmd_control()
2296 dcc->discard_granularity = BLKS_PER_SEG(sbi); in create_discard_cmd_control()
2297 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION) in create_discard_cmd_control()
2298 dcc->discard_granularity = BLKS_PER_SEC(sbi); in create_discard_cmd_control()
2310 dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg; in create_discard_cmd_control()
2322 SM_I(sbi)->dcc_info = dcc; in create_discard_cmd_control()
2324 err = f2fs_start_discard_thread(sbi); in create_discard_cmd_control()
2327 SM_I(sbi)->dcc_info = NULL; in create_discard_cmd_control()
2333 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi) in destroy_discard_cmd_control() argument
2335 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in destroy_discard_cmd_control()
2340 f2fs_stop_discard_thread(sbi); in destroy_discard_cmd_control()
2346 f2fs_issue_discard_timeout(sbi); in destroy_discard_cmd_control()
2349 SM_I(sbi)->dcc_info = NULL; in destroy_discard_cmd_control()
2352 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) in __mark_sit_entry_dirty() argument
2354 struct sit_info *sit_i = SIT_I(sbi); in __mark_sit_entry_dirty()
2364 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, in __set_sit_entry_type() argument
2367 struct seg_entry *se = get_seg_entry(sbi, segno); in __set_sit_entry_type()
2371 __mark_sit_entry_dirty(sbi, segno); in __set_sit_entry_type()
2374 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi, in get_segment_mtime() argument
2377 unsigned int segno = GET_SEGNO(sbi, blkaddr); in get_segment_mtime()
2381 return get_seg_entry(sbi, segno)->mtime; in get_segment_mtime()
2384 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr, in update_segment_mtime() argument
2388 unsigned int segno = GET_SEGNO(sbi, blkaddr); in update_segment_mtime()
2389 unsigned long long ctime = get_mtime(sbi, false); in update_segment_mtime()
2395 se = get_seg_entry(sbi, segno); in update_segment_mtime()
2403 if (ctime > SIT_I(sbi)->max_mtime) in update_segment_mtime()
2404 SIT_I(sbi)->max_mtime = ctime; in update_segment_mtime()
2407 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) in update_sit_entry() argument
2417 segno = GET_SEGNO(sbi, blkaddr); in update_sit_entry()
2421 se = get_seg_entry(sbi, segno); in update_sit_entry()
2423 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); in update_sit_entry()
2425 f2fs_bug_on(sbi, (new_vblocks < 0 || in update_sit_entry()
2426 (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno)))); in update_sit_entry()
2437 f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d", in update_sit_entry()
2439 f2fs_bug_on(sbi, 1); in update_sit_entry()
2443 f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", in update_sit_entry()
2445 f2fs_bug_on(sbi, 1); in update_sit_entry()
2450 if (f2fs_block_unit_discard(sbi) && in update_sit_entry()
2452 sbi->discard_blks--; in update_sit_entry()
2458 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { in update_sit_entry()
2468 f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d", in update_sit_entry()
2470 f2fs_bug_on(sbi, 1); in update_sit_entry()
2474 f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", in update_sit_entry()
2476 f2fs_bug_on(sbi, 1); in update_sit_entry()
2479 } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in update_sit_entry()
2487 spin_lock(&sbi->stat_lock); in update_sit_entry()
2488 sbi->unusable_block_count++; in update_sit_entry()
2489 spin_unlock(&sbi->stat_lock); in update_sit_entry()
2493 if (f2fs_block_unit_discard(sbi) && in update_sit_entry()
2495 sbi->discard_blks++; in update_sit_entry()
2500 __mark_sit_entry_dirty(sbi, segno); in update_sit_entry()
2503 SIT_I(sbi)->written_valid_blocks += del; in update_sit_entry()
2505 if (__is_large_section(sbi)) in update_sit_entry()
2506 get_sec_entry(sbi, segno)->valid_blocks += del; in update_sit_entry()
2509 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) in f2fs_invalidate_blocks() argument
2511 unsigned int segno = GET_SEGNO(sbi, addr); in f2fs_invalidate_blocks()
2512 struct sit_info *sit_i = SIT_I(sbi); in f2fs_invalidate_blocks()
2514 f2fs_bug_on(sbi, addr == NULL_ADDR); in f2fs_invalidate_blocks()
2518 f2fs_invalidate_internal_cache(sbi, addr); in f2fs_invalidate_blocks()
2523 update_segment_mtime(sbi, addr, 0); in f2fs_invalidate_blocks()
2524 update_sit_entry(sbi, addr, -1); in f2fs_invalidate_blocks()
2527 locate_dirty_segment(sbi, segno); in f2fs_invalidate_blocks()
2532 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) in f2fs_is_checkpointed_data() argument
2534 struct sit_info *sit_i = SIT_I(sbi); in f2fs_is_checkpointed_data()
2544 segno = GET_SEGNO(sbi, blkaddr); in f2fs_is_checkpointed_data()
2545 se = get_seg_entry(sbi, segno); in f2fs_is_checkpointed_data()
2546 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); in f2fs_is_checkpointed_data()
2556 static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int type) in f2fs_curseg_valid_blocks() argument
2558 struct curseg_info *curseg = CURSEG_I(sbi, type); in f2fs_curseg_valid_blocks()
2560 if (sbi->ckpt->alloc_type[type] == SSR) in f2fs_curseg_valid_blocks()
2561 return BLKS_PER_SEG(sbi); in f2fs_curseg_valid_blocks()
2568 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) in f2fs_npages_for_summary_flush() argument
2574 if (sbi->ckpt->alloc_type[i] != SSR && for_ra) in f2fs_npages_for_summary_flush()
2576 le16_to_cpu(F2FS_CKPT(sbi)->cur_data_blkoff[i]); in f2fs_npages_for_summary_flush()
2578 valid_sum_count += f2fs_curseg_valid_blocks(sbi, i); in f2fs_npages_for_summary_flush()
2594 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) in f2fs_get_sum_page() argument
2596 if (unlikely(f2fs_cp_error(sbi))) in f2fs_get_sum_page()
2598 return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno)); in f2fs_get_sum_page()
2601 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, in f2fs_update_meta_page() argument
2604 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); in f2fs_update_meta_page()
2611 static void write_sum_page(struct f2fs_sb_info *sbi, in write_sum_page() argument
2614 f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr); in write_sum_page()
2617 static void write_current_sum_page(struct f2fs_sb_info *sbi, in write_current_sum_page() argument
2620 struct curseg_info *curseg = CURSEG_I(sbi, type); in write_current_sum_page()
2621 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); in write_current_sum_page()
2643 static int is_next_segment_free(struct f2fs_sb_info *sbi, in is_next_segment_free() argument
2647 struct free_segmap_info *free_i = FREE_I(sbi); in is_next_segment_free()
2649 if (segno < MAIN_SEGS(sbi) && segno % SEGS_PER_SEC(sbi)) in is_next_segment_free()
2658 static void get_new_segment(struct f2fs_sb_info *sbi, in get_new_segment() argument
2661 struct free_segmap_info *free_i = FREE_I(sbi); in get_new_segment()
2663 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone; in get_new_segment()
2664 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg); in get_new_segment()
2665 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg); in get_new_segment()
2672 if (!new_sec && ((*newseg + 1) % SEGS_PER_SEC(sbi))) { in get_new_segment()
2674 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1); in get_new_segment()
2675 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1)) in get_new_segment()
2683 if (f2fs_sb_has_blkzoned(sbi)) { in get_new_segment()
2684 segno = pinning ? 0 : max(first_zoned_segno(sbi), *newseg); in get_new_segment()
2685 hint = GET_SEC_FROM_SEG(sbi, segno); in get_new_segment()
2689 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); in get_new_segment()
2690 if (secno >= MAIN_SECS(sbi)) { in get_new_segment()
2692 MAIN_SECS(sbi)); in get_new_segment()
2693 if (secno >= MAIN_SECS(sbi)) { in get_new_segment()
2698 segno = GET_SEG_FROM_SEC(sbi, secno); in get_new_segment()
2699 zoneno = GET_ZONE_FROM_SEC(sbi, secno); in get_new_segment()
2704 if (sbi->secs_per_zone == 1) in get_new_segment()
2709 if (CURSEG_I(sbi, i)->zone == zoneno) in get_new_segment()
2717 hint = (zoneno + 1) * sbi->secs_per_zone; in get_new_segment()
2723 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); in get_new_segment()
2724 __set_inuse(sbi, segno); in get_new_segment()
2730 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT); in get_new_segment()
2731 f2fs_bug_on(sbi, 1); in get_new_segment()
2735 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) in reset_curseg() argument
2737 struct curseg_info *curseg = CURSEG_I(sbi, type); in reset_curseg()
2743 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno); in reset_curseg()
2750 sanity_check_seg_type(sbi, seg_type); in reset_curseg()
2756 __set_sit_entry_type(sbi, seg_type, curseg->segno, modified); in reset_curseg()
2759 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type) in __get_next_segno() argument
2761 struct curseg_info *curseg = CURSEG_I(sbi, type); in __get_next_segno()
2764 sanity_check_seg_type(sbi, seg_type); in __get_next_segno()
2765 if (f2fs_need_rand_seg(sbi)) in __get_next_segno()
2766 return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi)); in __get_next_segno()
2768 if (__is_large_section(sbi)) in __get_next_segno()
2775 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) in __get_next_segno()
2781 if (SIT_I(sbi)->last_victim[ALLOC_NEXT]) in __get_next_segno()
2782 return SIT_I(sbi)->last_victim[ALLOC_NEXT]; in __get_next_segno()
2785 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) in __get_next_segno()
2795 static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) in new_curseg() argument
2797 struct curseg_info *curseg = CURSEG_I(sbi, type); in new_curseg()
2802 write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno)); in new_curseg()
2804 segno = __get_next_segno(sbi, type); in new_curseg()
2805 get_new_segment(sbi, &segno, new_sec, pinning); in new_curseg()
2807 !f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) { in new_curseg()
2808 __set_free(sbi, segno); in new_curseg()
2813 reset_curseg(sbi, type, 1); in new_curseg()
2815 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) in new_curseg()
2817 get_random_u32_inclusive(1, sbi->max_fragment_chunk); in new_curseg()
2821 static int __next_free_blkoff(struct f2fs_sb_info *sbi, in __next_free_blkoff() argument
2824 struct seg_entry *se = get_seg_entry(sbi, segno); in __next_free_blkoff()
2826 unsigned long *target_map = SIT_I(sbi)->tmp_map; in __next_free_blkoff()
2834 return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start); in __next_free_blkoff()
2837 static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi, in f2fs_find_next_ssr_block() argument
2840 return __next_free_blkoff(sbi, seg->segno, seg->next_blkoff + 1); in f2fs_find_next_ssr_block()
2843 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno) in f2fs_segment_has_free_slot() argument
2845 return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi); in f2fs_segment_has_free_slot()
2852 static void change_curseg(struct f2fs_sb_info *sbi, int type) in change_curseg() argument
2854 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in change_curseg()
2855 struct curseg_info *curseg = CURSEG_I(sbi, type); in change_curseg()
2861 write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno)); in change_curseg()
2863 __set_test_and_inuse(sbi, new_segno); in change_curseg()
2866 __remove_dirty_segment(sbi, new_segno, PRE); in change_curseg()
2867 __remove_dirty_segment(sbi, new_segno, DIRTY); in change_curseg()
2870 reset_curseg(sbi, type, 1); in change_curseg()
2872 curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0); in change_curseg()
2874 sum_page = f2fs_get_sum_page(sbi, new_segno); in change_curseg()
2885 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2888 static void get_atssr_segment(struct f2fs_sb_info *sbi, int type, in get_atssr_segment() argument
2892 struct curseg_info *curseg = CURSEG_I(sbi, type); in get_atssr_segment()
2896 if (get_ssr_segment(sbi, type, alloc_mode, age)) { in get_atssr_segment()
2897 struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno); in get_atssr_segment()
2900 change_curseg(sbi, type); in get_atssr_segment()
2904 new_curseg(sbi, type, true); in get_atssr_segment()
2906 stat_inc_seg_type(sbi, curseg); in get_atssr_segment()
2909 static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi) in __f2fs_init_atgc_curseg() argument
2911 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC); in __f2fs_init_atgc_curseg()
2913 if (!sbi->am.atgc_enabled) in __f2fs_init_atgc_curseg()
2916 f2fs_down_read(&SM_I(sbi)->curseg_lock); in __f2fs_init_atgc_curseg()
2919 down_write(&SIT_I(sbi)->sentry_lock); in __f2fs_init_atgc_curseg()
2921 get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0); in __f2fs_init_atgc_curseg()
2923 up_write(&SIT_I(sbi)->sentry_lock); in __f2fs_init_atgc_curseg()
2926 f2fs_up_read(&SM_I(sbi)->curseg_lock); in __f2fs_init_atgc_curseg()
2929 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi) in f2fs_init_inmem_curseg() argument
2931 __f2fs_init_atgc_curseg(sbi); in f2fs_init_inmem_curseg()
2934 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type) in __f2fs_save_inmem_curseg() argument
2936 struct curseg_info *curseg = CURSEG_I(sbi, type); in __f2fs_save_inmem_curseg()
2942 if (get_valid_blocks(sbi, curseg->segno, false)) { in __f2fs_save_inmem_curseg()
2943 write_sum_page(sbi, curseg->sum_blk, in __f2fs_save_inmem_curseg()
2944 GET_SUM_BLOCK(sbi, curseg->segno)); in __f2fs_save_inmem_curseg()
2946 mutex_lock(&DIRTY_I(sbi)->seglist_lock); in __f2fs_save_inmem_curseg()
2947 __set_test_and_free(sbi, curseg->segno, true); in __f2fs_save_inmem_curseg()
2948 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); in __f2fs_save_inmem_curseg()
2954 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi) in f2fs_save_inmem_curseg() argument
2956 __f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED); in f2fs_save_inmem_curseg()
2958 if (sbi->am.atgc_enabled) in f2fs_save_inmem_curseg()
2959 __f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC); in f2fs_save_inmem_curseg()
2962 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type) in __f2fs_restore_inmem_curseg() argument
2964 struct curseg_info *curseg = CURSEG_I(sbi, type); in __f2fs_restore_inmem_curseg()
2969 if (get_valid_blocks(sbi, curseg->segno, false)) in __f2fs_restore_inmem_curseg()
2972 mutex_lock(&DIRTY_I(sbi)->seglist_lock); in __f2fs_restore_inmem_curseg()
2973 __set_test_and_inuse(sbi, curseg->segno); in __f2fs_restore_inmem_curseg()
2974 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); in __f2fs_restore_inmem_curseg()
2979 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi) in f2fs_restore_inmem_curseg() argument
2981 __f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED); in f2fs_restore_inmem_curseg()
2983 if (sbi->am.atgc_enabled) in f2fs_restore_inmem_curseg()
2984 __f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC); in f2fs_restore_inmem_curseg()
2987 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type, in get_ssr_segment() argument
2990 struct curseg_info *curseg = CURSEG_I(sbi, type); in get_ssr_segment()
2996 sanity_check_seg_type(sbi, seg_type); in get_ssr_segment()
2999 if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) { in get_ssr_segment()
3026 if (!f2fs_get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) { in get_ssr_segment()
3033 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in get_ssr_segment()
3034 segno = get_free_segment(sbi); in get_ssr_segment()
3043 static bool need_new_seg(struct f2fs_sb_info *sbi, int type) in need_new_seg() argument
3045 struct curseg_info *curseg = CURSEG_I(sbi, type); in need_new_seg()
3047 if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) && in need_new_seg()
3051 is_next_segment_free(sbi, curseg, type) && in need_new_seg()
3052 likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED))) in need_new_seg()
3054 if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0)) in need_new_seg()
3059 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, in f2fs_allocate_segment_for_resize() argument
3062 struct curseg_info *curseg = CURSEG_I(sbi, type); in f2fs_allocate_segment_for_resize()
3065 f2fs_down_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_segment_for_resize()
3067 down_write(&SIT_I(sbi)->sentry_lock); in f2fs_allocate_segment_for_resize()
3069 segno = CURSEG_I(sbi, type)->segno; in f2fs_allocate_segment_for_resize()
3073 if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0)) in f2fs_allocate_segment_for_resize()
3074 change_curseg(sbi, type); in f2fs_allocate_segment_for_resize()
3076 new_curseg(sbi, type, true); in f2fs_allocate_segment_for_resize()
3078 stat_inc_seg_type(sbi, curseg); in f2fs_allocate_segment_for_resize()
3080 locate_dirty_segment(sbi, segno); in f2fs_allocate_segment_for_resize()
3082 up_write(&SIT_I(sbi)->sentry_lock); in f2fs_allocate_segment_for_resize()
3085 f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u", in f2fs_allocate_segment_for_resize()
3089 f2fs_up_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_segment_for_resize()
3092 static int __allocate_new_segment(struct f2fs_sb_info *sbi, int type, in __allocate_new_segment() argument
3095 struct curseg_info *curseg = CURSEG_I(sbi, type); in __allocate_new_segment()
3100 !get_valid_blocks(sbi, curseg->segno, new_sec) && in __allocate_new_segment()
3101 !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec)) in __allocate_new_segment()
3105 if (new_curseg(sbi, type, true)) in __allocate_new_segment()
3107 stat_inc_seg_type(sbi, curseg); in __allocate_new_segment()
3108 locate_dirty_segment(sbi, old_segno); in __allocate_new_segment()
3112 int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force) in f2fs_allocate_new_section() argument
3116 f2fs_down_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_new_section()
3117 down_write(&SIT_I(sbi)->sentry_lock); in f2fs_allocate_new_section()
3118 ret = __allocate_new_segment(sbi, type, true, force); in f2fs_allocate_new_section()
3119 up_write(&SIT_I(sbi)->sentry_lock); in f2fs_allocate_new_section()
3120 f2fs_up_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_new_section()
3125 int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi) in f2fs_allocate_pinning_section() argument
3131 f2fs_lock_op(sbi); in f2fs_allocate_pinning_section()
3132 err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false); in f2fs_allocate_pinning_section()
3133 f2fs_unlock_op(sbi); in f2fs_allocate_pinning_section()
3135 if (f2fs_sb_has_blkzoned(sbi) && err && gc_required) { in f2fs_allocate_pinning_section()
3136 f2fs_down_write(&sbi->gc_lock); in f2fs_allocate_pinning_section()
3137 f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), true, 1); in f2fs_allocate_pinning_section()
3138 f2fs_up_write(&sbi->gc_lock); in f2fs_allocate_pinning_section()
3147 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) in f2fs_allocate_new_segments() argument
3151 f2fs_down_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_new_segments()
3152 down_write(&SIT_I(sbi)->sentry_lock); in f2fs_allocate_new_segments()
3154 __allocate_new_segment(sbi, i, false, false); in f2fs_allocate_new_segments()
3155 up_write(&SIT_I(sbi)->sentry_lock); in f2fs_allocate_new_segments()
3156 f2fs_up_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_new_segments()
3159 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, in f2fs_exist_trim_candidates() argument
3165 down_write(&SIT_I(sbi)->sentry_lock); in f2fs_exist_trim_candidates()
3167 if (add_discard_addrs(sbi, cpc, true)) { in f2fs_exist_trim_candidates()
3172 up_write(&SIT_I(sbi)->sentry_lock); in f2fs_exist_trim_candidates()
3178 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi, in __issue_discard_cmd_range() argument
3182 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __issue_discard_cmd_range()
3195 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); in __issue_discard_cmd_range()
3216 err = __submit_discard_cmd(sbi, dpolicy, dc, &issued); in __issue_discard_cmd_range()
3222 __remove_discard_cmd(sbi, dc); in __issue_discard_cmd_range()
3226 trimmed += __wait_all_discard_cmd(sbi, NULL); in __issue_discard_cmd_range()
3233 __remove_discard_cmd(sbi, dc); in __issue_discard_cmd_range()
3246 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) in f2fs_trim_fs() argument
3256 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi); in f2fs_trim_fs()
3258 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) in f2fs_trim_fs()
3261 if (end < MAIN_BLKADDR(sbi)) in f2fs_trim_fs()
3264 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { in f2fs_trim_fs()
3265 f2fs_warn(sbi, "Found FS corruption, run fsck to fix."); in f2fs_trim_fs()
3270 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); in f2fs_trim_fs()
3271 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : in f2fs_trim_fs()
3272 GET_SEGNO(sbi, end); in f2fs_trim_fs()
3274 start_segno = rounddown(start_segno, SEGS_PER_SEC(sbi)); in f2fs_trim_fs()
3275 end_segno = roundup(end_segno + 1, SEGS_PER_SEC(sbi)) - 1; in f2fs_trim_fs()
3283 if (sbi->discard_blks == 0) in f2fs_trim_fs()
3286 f2fs_down_write(&sbi->gc_lock); in f2fs_trim_fs()
3287 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_trim_fs()
3288 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_trim_fs()
3289 f2fs_up_write(&sbi->gc_lock); in f2fs_trim_fs()
3299 if (f2fs_realtime_discard_enable(sbi)) in f2fs_trim_fs()
3302 start_block = START_BLOCK(sbi, start_segno); in f2fs_trim_fs()
3303 end_block = START_BLOCK(sbi, end_segno + 1); in f2fs_trim_fs()
3305 __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen); in f2fs_trim_fs()
3306 trimmed = __issue_discard_cmd_range(sbi, &dpolicy, in f2fs_trim_fs()
3309 trimmed += __wait_discard_cmd_range(sbi, &dpolicy, in f2fs_trim_fs()
3356 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in __get_age_segment_type() local
3362 if (ei.age <= sbi->hot_data_age_threshold) in __get_age_segment_type()
3364 if (ei.age <= sbi->warm_data_age_threshold) in __get_age_segment_type()
3381 if (fio->sbi->am.atgc_enabled && in __get_segment_type_6()
3383 (fio->sbi->gc_mode != GC_URGENT_HIGH) && in __get_segment_type_6()
3414 switch (F2FS_OPTION(fio->sbi).active_logs) { in __get_segment_type()
3425 f2fs_bug_on(fio->sbi, true); in __get_segment_type()
3437 static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi, in f2fs_randomize_chunk() argument
3445 get_random_u32_inclusive(1, sbi->max_fragment_chunk); in f2fs_randomize_chunk()
3447 get_random_u32_inclusive(1, sbi->max_fragment_hole); in f2fs_randomize_chunk()
3450 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, in f2fs_allocate_data_block() argument
3455 struct sit_info *sit_i = SIT_I(sbi); in f2fs_allocate_data_block()
3456 struct curseg_info *curseg = CURSEG_I(sbi, type); in f2fs_allocate_data_block()
3462 f2fs_down_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_data_block()
3468 f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO); in f2fs_allocate_data_block()
3469 se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr)); in f2fs_allocate_data_block()
3470 sanity_check_seg_type(sbi, se->type); in f2fs_allocate_data_block()
3471 f2fs_bug_on(sbi, IS_NODESEG(se->type)); in f2fs_allocate_data_block()
3473 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); in f2fs_allocate_data_block()
3475 f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi)); in f2fs_allocate_data_block()
3477 f2fs_wait_discard_bio(sbi, *new_blkaddr); in f2fs_allocate_data_block()
3481 curseg->next_blkoff = f2fs_find_next_ssr_block(sbi, curseg); in f2fs_allocate_data_block()
3484 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) in f2fs_allocate_data_block()
3485 f2fs_randomize_chunk(sbi, curseg); in f2fs_allocate_data_block()
3487 if (curseg->next_blkoff >= f2fs_usable_blks_in_seg(sbi, curseg->segno)) in f2fs_allocate_data_block()
3489 stat_inc_block_count(sbi, curseg); in f2fs_allocate_data_block()
3492 old_mtime = get_segment_mtime(sbi, old_blkaddr); in f2fs_allocate_data_block()
3494 update_segment_mtime(sbi, old_blkaddr, 0); in f2fs_allocate_data_block()
3497 update_segment_mtime(sbi, *new_blkaddr, old_mtime); in f2fs_allocate_data_block()
3503 update_sit_entry(sbi, *new_blkaddr, 1); in f2fs_allocate_data_block()
3504 update_sit_entry(sbi, old_blkaddr, -1); in f2fs_allocate_data_block()
3512 !((curseg->segno + 1) % sbi->segs_per_sec)) { in f2fs_allocate_data_block()
3513 write_sum_page(sbi, curseg->sum_blk, in f2fs_allocate_data_block()
3514 GET_SUM_BLOCK(sbi, curseg->segno)); in f2fs_allocate_data_block()
3519 get_atssr_segment(sbi, type, se->type, in f2fs_allocate_data_block()
3522 if (need_new_seg(sbi, type)) in f2fs_allocate_data_block()
3523 new_curseg(sbi, type, false); in f2fs_allocate_data_block()
3525 change_curseg(sbi, type); in f2fs_allocate_data_block()
3526 stat_inc_seg_type(sbi, curseg); in f2fs_allocate_data_block()
3536 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); in f2fs_allocate_data_block()
3537 locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr)); in f2fs_allocate_data_block()
3540 atomic64_inc(&sbi->allocated_data_blocks); in f2fs_allocate_data_block()
3545 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); in f2fs_allocate_data_block()
3547 f2fs_inode_chksum_set(sbi, page); in f2fs_allocate_data_block()
3555 io = sbi->write_io[fio->type] + fio->temp; in f2fs_allocate_data_block()
3563 f2fs_up_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_data_block()
3566 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, in f2fs_update_device_state() argument
3569 if (!f2fs_is_multi_device(sbi)) in f2fs_update_device_state()
3573 unsigned int devidx = f2fs_target_device_index(sbi, blkaddr); in f2fs_update_device_state()
3577 f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO); in f2fs_update_device_state()
3580 if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) { in f2fs_update_device_state()
3581 spin_lock(&sbi->dev_lock); in f2fs_update_device_state()
3582 f2fs_set_bit(devidx, (char *)&sbi->dirty_device); in f2fs_update_device_state()
3583 spin_unlock(&sbi->dev_lock); in f2fs_update_device_state()
3596 bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA); in do_write_page()
3599 f2fs_down_read(&fio->sbi->io_order_lock); in do_write_page()
3601 f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, in do_write_page()
3603 if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) in do_write_page()
3604 f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr); in do_write_page()
3609 f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1); in do_write_page()
3612 f2fs_up_read(&fio->sbi->io_order_lock); in do_write_page()
3615 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, in f2fs_do_write_meta_page() argument
3619 .sbi = sbi, in f2fs_do_write_meta_page()
3631 if (unlikely(page->index >= MAIN_BLKADDR(sbi))) in f2fs_do_write_meta_page()
3637 stat_inc_meta_count(sbi, page->index); in f2fs_do_write_meta_page()
3638 f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE); in f2fs_do_write_meta_page()
3648 f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE); in f2fs_do_write_node_page()
3654 struct f2fs_sb_info *sbi = fio->sbi; in f2fs_outplace_write_data() local
3657 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); in f2fs_outplace_write_data()
3664 f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE); in f2fs_outplace_write_data()
3670 struct f2fs_sb_info *sbi = fio->sbi; in f2fs_inplace_write_data() local
3677 segno = GET_SEGNO(sbi, fio->new_blkaddr); in f2fs_inplace_write_data()
3679 if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) { in f2fs_inplace_write_data()
3680 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_inplace_write_data()
3681 f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.", in f2fs_inplace_write_data()
3684 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE); in f2fs_inplace_write_data()
3688 if (f2fs_cp_error(sbi)) { in f2fs_inplace_write_data()
3694 f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1); in f2fs_inplace_write_data()
3696 stat_inc_inplace_blocks(fio->sbi); in f2fs_inplace_write_data()
3698 if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi)) in f2fs_inplace_write_data()
3703 f2fs_update_device_state(fio->sbi, fio->ino, in f2fs_inplace_write_data()
3705 f2fs_update_iostat(fio->sbi, fio->page->mapping->host, in f2fs_inplace_write_data()
3721 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi, in __f2fs_get_curseg() argument
3727 if (CURSEG_I(sbi, i)->segno == segno) in __f2fs_get_curseg()
3733 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, in f2fs_do_replace_block() argument
3738 struct sit_info *sit_i = SIT_I(sbi); in f2fs_do_replace_block()
3746 segno = GET_SEGNO(sbi, new_blkaddr); in f2fs_do_replace_block()
3747 se = get_seg_entry(sbi, segno); in f2fs_do_replace_block()
3750 f2fs_down_write(&SM_I(sbi)->curseg_lock); in f2fs_do_replace_block()
3754 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { in f2fs_do_replace_block()
3761 if (IS_CURSEG(sbi, segno)) { in f2fs_do_replace_block()
3763 type = __f2fs_get_curseg(sbi, segno); in f2fs_do_replace_block()
3764 f2fs_bug_on(sbi, type == NO_CHECK_TYPE); in f2fs_do_replace_block()
3770 curseg = CURSEG_I(sbi, type); in f2fs_do_replace_block()
3771 f2fs_bug_on(sbi, !IS_DATASEG(curseg->seg_type)); in f2fs_do_replace_block()
3783 change_curseg(sbi, type); in f2fs_do_replace_block()
3786 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr); in f2fs_do_replace_block()
3791 update_segment_mtime(sbi, new_blkaddr, 0); in f2fs_do_replace_block()
3792 update_sit_entry(sbi, new_blkaddr, 1); in f2fs_do_replace_block()
3794 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) { in f2fs_do_replace_block()
3795 f2fs_invalidate_internal_cache(sbi, old_blkaddr); in f2fs_do_replace_block()
3797 update_segment_mtime(sbi, old_blkaddr, 0); in f2fs_do_replace_block()
3798 update_sit_entry(sbi, old_blkaddr, -1); in f2fs_do_replace_block()
3801 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); in f2fs_do_replace_block()
3802 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr)); in f2fs_do_replace_block()
3804 locate_dirty_segment(sbi, old_cursegno); in f2fs_do_replace_block()
3809 change_curseg(sbi, type); in f2fs_do_replace_block()
3817 f2fs_up_write(&SM_I(sbi)->curseg_lock); in f2fs_do_replace_block()
3820 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, in f2fs_replace_block() argument
3829 f2fs_do_replace_block(sbi, &sum, old_addr, new_addr, in f2fs_replace_block()
3839 struct f2fs_sb_info *sbi = F2FS_P_SB(page); in f2fs_wait_on_page_writeback() local
3842 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type); in f2fs_wait_on_page_writeback()
3844 f2fs_submit_merged_ipu_write(sbi, NULL, page); in f2fs_wait_on_page_writeback()
3847 f2fs_bug_on(sbi, locked && PageWriteback(page)); in f2fs_wait_on_page_writeback()
3856 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_wait_on_block_writeback() local
3865 cpage = find_lock_page(META_MAPPING(sbi), blkaddr); in f2fs_wait_on_block_writeback()
3875 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_wait_on_block_writeback_range() local
3884 f2fs_truncate_meta_inode_pages(sbi, blkaddr, len); in f2fs_wait_on_block_writeback_range()
3887 static int read_compacted_summaries(struct f2fs_sb_info *sbi) in read_compacted_summaries() argument
3889 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in read_compacted_summaries()
3896 start = start_sum_block(sbi); in read_compacted_summaries()
3898 page = f2fs_get_meta_page(sbi, start++); in read_compacted_summaries()
3904 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); in read_compacted_summaries()
3908 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); in read_compacted_summaries()
3917 seg_i = CURSEG_I(sbi, i); in read_compacted_summaries()
3921 reset_curseg(sbi, i, 0); in read_compacted_summaries()
3926 blk_off = BLKS_PER_SEG(sbi); in read_compacted_summaries()
3941 page = f2fs_get_meta_page(sbi, start++); in read_compacted_summaries()
3952 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) in read_normal_summaries() argument
3954 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in read_normal_summaries()
3968 if (__exist_node_summaries(sbi)) in read_normal_summaries()
3969 blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type); in read_normal_summaries()
3971 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); in read_normal_summaries()
3977 if (__exist_node_summaries(sbi)) in read_normal_summaries()
3978 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, in read_normal_summaries()
3981 blk_addr = GET_SUM_BLOCK(sbi, segno); in read_normal_summaries()
3984 new = f2fs_get_meta_page(sbi, blk_addr); in read_normal_summaries()
3990 if (__exist_node_summaries(sbi)) { in read_normal_summaries()
3994 for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) { in read_normal_summaries()
3999 err = f2fs_restore_node_summary(sbi, segno, sum); in read_normal_summaries()
4006 curseg = CURSEG_I(sbi, type); in read_normal_summaries()
4017 reset_curseg(sbi, type, 0); in read_normal_summaries()
4026 static int restore_curseg_summaries(struct f2fs_sb_info *sbi) in restore_curseg_summaries() argument
4028 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal; in restore_curseg_summaries()
4029 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal; in restore_curseg_summaries()
4033 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) { in restore_curseg_summaries()
4034 int npages = f2fs_npages_for_summary_flush(sbi, true); in restore_curseg_summaries()
4037 f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages, in restore_curseg_summaries()
4041 err = read_compacted_summaries(sbi); in restore_curseg_summaries()
4047 if (__exist_node_summaries(sbi)) in restore_curseg_summaries()
4048 f2fs_ra_meta_pages(sbi, in restore_curseg_summaries()
4049 sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type), in restore_curseg_summaries()
4053 err = read_normal_summaries(sbi, type); in restore_curseg_summaries()
4061 f2fs_err(sbi, "invalid journal entries nats %u sits %u", in restore_curseg_summaries()
4069 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) in write_compacted_summaries() argument
4078 page = f2fs_grab_meta_page(sbi, blkaddr++); in write_compacted_summaries()
4083 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); in write_compacted_summaries()
4088 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); in write_compacted_summaries()
4094 seg_i = CURSEG_I(sbi, i); in write_compacted_summaries()
4095 for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) { in write_compacted_summaries()
4097 page = f2fs_grab_meta_page(sbi, blkaddr++); in write_compacted_summaries()
4121 static void write_normal_summaries(struct f2fs_sb_info *sbi, in write_normal_summaries() argument
4132 write_current_sum_page(sbi, i, blkaddr + (i - type)); in write_normal_summaries()
4135 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) in f2fs_write_data_summaries() argument
4137 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) in f2fs_write_data_summaries()
4138 write_compacted_summaries(sbi, start_blk); in f2fs_write_data_summaries()
4140 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); in f2fs_write_data_summaries()
4143 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) in f2fs_write_node_summaries() argument
4145 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); in f2fs_write_node_summaries()
4170 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, in get_current_sit_page() argument
4173 return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno)); in get_current_sit_page()
4176 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, in get_next_sit_page() argument
4179 struct sit_info *sit_i = SIT_I(sbi); in get_next_sit_page()
4183 src_off = current_sit_addr(sbi, start); in get_next_sit_page()
4184 dst_off = next_sit_addr(sbi, src_off); in get_next_sit_page()
4186 page = f2fs_grab_meta_page(sbi, dst_off); in get_next_sit_page()
4187 seg_info_to_sit_page(sbi, page, start); in get_next_sit_page()
4249 static void add_sits_in_set(struct f2fs_sb_info *sbi) in add_sits_in_set() argument
4251 struct f2fs_sm_info *sm_info = SM_I(sbi); in add_sits_in_set()
4253 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap; in add_sits_in_set()
4256 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi)) in add_sits_in_set()
4260 static void remove_sits_in_journal(struct f2fs_sb_info *sbi) in remove_sits_in_journal() argument
4262 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); in remove_sits_in_journal()
4272 dirtied = __mark_sit_entry_dirty(sbi, segno); in remove_sits_in_journal()
4275 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set); in remove_sits_in_journal()
4285 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) in f2fs_flush_sit_entries() argument
4287 struct sit_info *sit_i = SIT_I(sbi); in f2fs_flush_sit_entries()
4289 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); in f2fs_flush_sit_entries()
4292 struct list_head *head = &SM_I(sbi)->sit_entry_set; in f2fs_flush_sit_entries()
4293 bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS); in f2fs_flush_sit_entries()
4305 add_sits_in_set(sbi); in f2fs_flush_sit_entries()
4314 remove_sits_in_journal(sbi); in f2fs_flush_sit_entries()
4326 (unsigned long)MAIN_SEGS(sbi)); in f2fs_flush_sit_entries()
4336 page = get_next_sit_page(sbi, start_segno); in f2fs_flush_sit_entries()
4344 se = get_seg_entry(sbi, segno); in f2fs_flush_sit_entries()
4348 f2fs_bug_on(sbi, 1); in f2fs_flush_sit_entries()
4354 add_discard_addrs(sbi, cpc, false); in f2fs_flush_sit_entries()
4360 f2fs_bug_on(sbi, offset < 0); in f2fs_flush_sit_entries()
4365 check_block_count(sbi, segno, in f2fs_flush_sit_entries()
4371 check_block_count(sbi, segno, in f2fs_flush_sit_entries()
4385 f2fs_bug_on(sbi, ses->entry_cnt); in f2fs_flush_sit_entries()
4389 f2fs_bug_on(sbi, !list_empty(head)); in f2fs_flush_sit_entries()
4390 f2fs_bug_on(sbi, sit_i->dirty_sentries); in f2fs_flush_sit_entries()
4396 add_discard_addrs(sbi, cpc, false); in f2fs_flush_sit_entries()
4402 set_prefree_as_free_segments(sbi); in f2fs_flush_sit_entries()
4405 static int build_sit_info(struct f2fs_sb_info *sbi) in build_sit_info() argument
4407 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in build_sit_info()
4412 unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0; in build_sit_info()
4415 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL); in build_sit_info()
4419 SM_I(sbi)->sit_info = sit_i; in build_sit_info()
4422 f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry), in build_sit_info()
4423 MAIN_SEGS(sbi)), in build_sit_info()
4428 main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); in build_sit_info()
4429 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size, in build_sit_info()
4435 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map); in build_sit_info()
4437 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map); in build_sit_info()
4439 sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); in build_sit_info()
4445 for (start = 0; start < MAIN_SEGS(sbi); start++) { in build_sit_info()
4463 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); in build_sit_info()
4467 if (__is_large_section(sbi)) { in build_sit_info()
4469 f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry), in build_sit_info()
4470 MAIN_SECS(sbi)), in build_sit_info()
4480 sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP); in build_sit_info()
4481 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); in build_sit_info()
4493 sit_i->invalid_segmap = f2fs_kvzalloc(sbi, in build_sit_info()
4500 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; in build_sit_info()
4505 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); in build_sit_info()
4511 static int build_free_segmap(struct f2fs_sb_info *sbi) in build_free_segmap() argument
4517 free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL); in build_free_segmap()
4521 SM_I(sbi)->free_info = free_i; in build_free_segmap()
4523 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); in build_free_segmap()
4524 free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL); in build_free_segmap()
4528 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); in build_free_segmap()
4529 free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL); in build_free_segmap()
4538 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); in build_free_segmap()
4545 static int build_curseg(struct f2fs_sb_info *sbi) in build_curseg() argument
4550 array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE, in build_curseg()
4555 SM_I(sbi)->curseg_array = array; in build_curseg()
4559 array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL); in build_curseg()
4563 array[i].journal = f2fs_kzalloc(sbi, in build_curseg()
4577 return restore_curseg_summaries(sbi); in build_curseg()
4580 static int build_sit_entries(struct f2fs_sb_info *sbi) in build_sit_entries() argument
4582 struct sit_info *sit_i = SIT_I(sbi); in build_sit_entries()
4583 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); in build_sit_entries()
4587 int sit_blk_cnt = SIT_BLK_CNT(sbi); in build_sit_entries()
4594 readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS, in build_sit_entries()
4600 for (; start < end && start < MAIN_SEGS(sbi); start++) { in build_sit_entries()
4605 page = get_current_sit_page(sbi, start); in build_sit_entries()
4612 err = check_block_count(sbi, start, &sit); in build_sit_entries()
4618 f2fs_err(sbi, "Invalid segment type: %u, segno: %u", in build_sit_entries()
4620 f2fs_handle_error(sbi, in build_sit_entries()
4627 if (!f2fs_block_unit_discard(sbi)) in build_sit_entries()
4631 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { in build_sit_entries()
4638 sbi->discard_blks += BLKS_PER_SEG(sbi) - in build_sit_entries()
4641 if (__is_large_section(sbi)) in build_sit_entries()
4642 get_sec_entry(sbi, start)->valid_blocks += in build_sit_entries()
4653 if (start >= MAIN_SEGS(sbi)) { in build_sit_entries()
4654 f2fs_err(sbi, "Wrong journal entry on segno %u", in build_sit_entries()
4657 f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL); in build_sit_entries()
4668 err = check_block_count(sbi, start, &sit); in build_sit_entries()
4674 f2fs_err(sbi, "Invalid segment type: %u, segno: %u", in build_sit_entries()
4677 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE); in build_sit_entries()
4683 if (f2fs_block_unit_discard(sbi)) { in build_sit_entries()
4684 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { in build_sit_entries()
4689 sbi->discard_blks += old_valid_blocks; in build_sit_entries()
4690 sbi->discard_blks -= se->valid_blocks; in build_sit_entries()
4694 if (__is_large_section(sbi)) { in build_sit_entries()
4695 get_sec_entry(sbi, start)->valid_blocks += in build_sit_entries()
4697 get_sec_entry(sbi, start)->valid_blocks -= in build_sit_entries()
4706 if (sit_valid_blocks[NODE] != valid_node_count(sbi)) { in build_sit_entries()
4707 f2fs_err(sbi, "SIT is corrupted node# %u vs %u", in build_sit_entries()
4708 sit_valid_blocks[NODE], valid_node_count(sbi)); in build_sit_entries()
4709 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT); in build_sit_entries()
4714 valid_user_blocks(sbi)) { in build_sit_entries()
4715 f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u", in build_sit_entries()
4717 valid_user_blocks(sbi)); in build_sit_entries()
4718 f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT); in build_sit_entries()
4725 static void init_free_segmap(struct f2fs_sb_info *sbi) in init_free_segmap() argument
4731 for (start = 0; start < MAIN_SEGS(sbi); start++) { in init_free_segmap()
4732 if (f2fs_usable_blks_in_seg(sbi, start) == 0) in init_free_segmap()
4734 sentry = get_seg_entry(sbi, start); in init_free_segmap()
4736 __set_free(sbi, start); in init_free_segmap()
4738 SIT_I(sbi)->written_valid_blocks += in init_free_segmap()
4744 struct curseg_info *curseg_t = CURSEG_I(sbi, type); in init_free_segmap()
4746 __set_test_and_inuse(sbi, curseg_t->segno); in init_free_segmap()
4750 static void init_dirty_segmap(struct f2fs_sb_info *sbi) in init_dirty_segmap() argument
4752 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in init_dirty_segmap()
4753 struct free_segmap_info *free_i = FREE_I(sbi); in init_dirty_segmap()
4759 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset); in init_dirty_segmap()
4760 if (segno >= MAIN_SEGS(sbi)) in init_dirty_segmap()
4763 valid_blocks = get_valid_blocks(sbi, segno, false); in init_dirty_segmap()
4764 usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); in init_dirty_segmap()
4768 f2fs_bug_on(sbi, 1); in init_dirty_segmap()
4772 __locate_dirty_segment(sbi, segno, DIRTY); in init_dirty_segmap()
4776 if (!__is_large_section(sbi)) in init_dirty_segmap()
4780 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) { in init_dirty_segmap()
4781 valid_blocks = get_valid_blocks(sbi, segno, true); in init_dirty_segmap()
4782 secno = GET_SEC_FROM_SEG(sbi, segno); in init_dirty_segmap()
4784 if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi)) in init_dirty_segmap()
4786 if (IS_CURSEC(sbi, secno)) in init_dirty_segmap()
4793 static int init_victim_secmap(struct f2fs_sb_info *sbi) in init_victim_secmap() argument
4795 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in init_victim_secmap()
4796 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); in init_victim_secmap()
4798 dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); in init_victim_secmap()
4802 dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); in init_victim_secmap()
4811 static int build_dirty_segmap(struct f2fs_sb_info *sbi) in build_dirty_segmap() argument
4817 dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info), in build_dirty_segmap()
4822 SM_I(sbi)->dirty_info = dirty_i; in build_dirty_segmap()
4825 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); in build_dirty_segmap()
4828 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size, in build_dirty_segmap()
4834 if (__is_large_section(sbi)) { in build_dirty_segmap()
4835 bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); in build_dirty_segmap()
4836 dirty_i->dirty_secmap = f2fs_kvzalloc(sbi, in build_dirty_segmap()
4842 init_dirty_segmap(sbi); in build_dirty_segmap()
4843 return init_victim_secmap(sbi); in build_dirty_segmap()
4846 static int sanity_check_curseg(struct f2fs_sb_info *sbi) in sanity_check_curseg() argument
4855 struct curseg_info *curseg = CURSEG_I(sbi, i); in sanity_check_curseg()
4856 struct seg_entry *se = get_seg_entry(sbi, curseg->segno); in sanity_check_curseg()
4859 if (f2fs_sb_has_readonly(sbi) && in sanity_check_curseg()
4863 sanity_check_seg_type(sbi, curseg->seg_type); in sanity_check_curseg()
4866 f2fs_err(sbi, in sanity_check_curseg()
4869 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG); in sanity_check_curseg()
4879 for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) { in sanity_check_curseg()
4883 f2fs_err(sbi, in sanity_check_curseg()
4887 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG); in sanity_check_curseg()
4896 static int check_zone_write_pointer(struct f2fs_sb_info *sbi, in check_zone_write_pointer() argument
4902 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT; in check_zone_write_pointer()
4910 wp_segno = GET_SEGNO(sbi, wp_block); in check_zone_write_pointer()
4911 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno); in check_zone_write_pointer()
4913 zone_segno = GET_SEGNO(sbi, zone_block); in check_zone_write_pointer()
4914 zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno); in check_zone_write_pointer()
4916 if (zone_segno >= MAIN_SEGS(sbi)) in check_zone_write_pointer()
4924 if (zone_secno == GET_SEC_FROM_SEG(sbi, in check_zone_write_pointer()
4925 CURSEG_I(sbi, i)->segno)) in check_zone_write_pointer()
4932 for (s = sbi->segs_per_sec - 1; s >= 0; s--) { in check_zone_write_pointer()
4934 se = get_seg_entry(sbi, segno); in check_zone_write_pointer()
4935 for (b = sbi->blocks_per_seg - 1; b >= 0; b--) in check_zone_write_pointer()
4937 last_valid_block = START_BLOCK(sbi, segno) + b; in check_zone_write_pointer()
4957 f2fs_notice(sbi, in check_zone_write_pointer()
4961 ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block, in check_zone_write_pointer()
4964 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)", in check_zone_write_pointer()
4977 f2fs_notice(sbi, "Valid blocks are not aligned with write pointer: " in check_zone_write_pointer()
4979 GET_SEGNO(sbi, last_valid_block), in check_zone_write_pointer()
4980 GET_BLKOFF_FROM_SEG0(sbi, last_valid_block), in check_zone_write_pointer()
4990 f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)", in check_zone_write_pointer()
4993 f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)", in check_zone_write_pointer()
5000 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi, in get_target_zoned_dev() argument
5005 for (i = 0; i < sbi->s_ndevs; i++) { in get_target_zoned_dev()
5008 if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr && in get_target_zoned_dev()
5023 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type) in fix_curseg_write_pointer() argument
5025 struct curseg_info *cs = CURSEG_I(sbi, type); in fix_curseg_write_pointer()
5030 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT; in fix_curseg_write_pointer()
5034 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno); in fix_curseg_write_pointer()
5035 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section)); in fix_curseg_write_pointer()
5037 zbd = get_target_zoned_dev(sbi, cs_zone_block); in fix_curseg_write_pointer()
5047 f2fs_err(sbi, "Report zone failed: %s errno=(%d)", in fix_curseg_write_pointer()
5056 wp_segno = GET_SEGNO(sbi, wp_block); in fix_curseg_write_pointer()
5057 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno); in fix_curseg_write_pointer()
5064 f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: " in fix_curseg_write_pointer()
5068 f2fs_notice(sbi, "Assign new section to curseg[%d]: " in fix_curseg_write_pointer()
5071 f2fs_allocate_new_section(sbi, type, true); in fix_curseg_write_pointer()
5074 if (check_zone_write_pointer(sbi, zbd, &zone)) in fix_curseg_write_pointer()
5078 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno); in fix_curseg_write_pointer()
5079 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section)); in fix_curseg_write_pointer()
5081 zbd = get_target_zoned_dev(sbi, cs_zone_block); in fix_curseg_write_pointer()
5090 f2fs_err(sbi, "Report zone failed: %s errno=(%d)", in fix_curseg_write_pointer()
5099 f2fs_notice(sbi, in fix_curseg_write_pointer()
5103 err = __f2fs_issue_discard_zone(sbi, zbd->bdev, cs_zone_block, in fix_curseg_write_pointer()
5106 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)", in fix_curseg_write_pointer()
5115 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi) in f2fs_fix_curseg_write_pointer() argument
5120 ret = fix_curseg_write_pointer(sbi, i); in f2fs_fix_curseg_write_pointer()
5129 struct f2fs_sb_info *sbi; member
5140 return check_zone_write_pointer(args->sbi, args->fdev, zone); in check_zone_write_pointer_cb()
5143 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi) in f2fs_check_write_pointer() argument
5148 for (i = 0; i < sbi->s_ndevs; i++) { in f2fs_check_write_pointer()
5152 args.sbi = sbi; in f2fs_check_write_pointer()
5172 struct f2fs_sb_info *sbi, unsigned int segno) in f2fs_usable_zone_blks_in_seg() argument
5177 if (!sbi->unusable_blocks_per_sec) in f2fs_usable_zone_blks_in_seg()
5178 return BLKS_PER_SEG(sbi); in f2fs_usable_zone_blks_in_seg()
5180 secno = GET_SEC_FROM_SEG(sbi, segno); in f2fs_usable_zone_blks_in_seg()
5181 seg_start = START_BLOCK(sbi, segno); in f2fs_usable_zone_blks_in_seg()
5182 sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno)); in f2fs_usable_zone_blks_in_seg()
5183 sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi); in f2fs_usable_zone_blks_in_seg()
5193 if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr) in f2fs_usable_zone_blks_in_seg()
5196 return BLKS_PER_SEG(sbi); in f2fs_usable_zone_blks_in_seg()
5199 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi) in f2fs_fix_curseg_write_pointer() argument
5204 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi) in f2fs_check_write_pointer() argument
5209 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi, in f2fs_usable_zone_blks_in_seg() argument
5216 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, in f2fs_usable_blks_in_seg() argument
5219 if (f2fs_sb_has_blkzoned(sbi)) in f2fs_usable_blks_in_seg()
5220 return f2fs_usable_zone_blks_in_seg(sbi, segno); in f2fs_usable_blks_in_seg()
5222 return BLKS_PER_SEG(sbi); in f2fs_usable_blks_in_seg()
5225 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, in f2fs_usable_segs_in_sec() argument
5228 if (f2fs_sb_has_blkzoned(sbi)) in f2fs_usable_segs_in_sec()
5229 return CAP_SEGS_PER_SEC(sbi); in f2fs_usable_segs_in_sec()
5231 return SEGS_PER_SEC(sbi); in f2fs_usable_segs_in_sec()
5237 static void init_min_max_mtime(struct f2fs_sb_info *sbi) in init_min_max_mtime() argument
5239 struct sit_info *sit_i = SIT_I(sbi); in init_min_max_mtime()
5246 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) { in init_min_max_mtime()
5250 for (i = 0; i < SEGS_PER_SEC(sbi); i++) in init_min_max_mtime()
5251 mtime += get_seg_entry(sbi, segno + i)->mtime; in init_min_max_mtime()
5253 mtime = div_u64(mtime, SEGS_PER_SEC(sbi)); in init_min_max_mtime()
5258 sit_i->max_mtime = get_mtime(sbi, false); in init_min_max_mtime()
5263 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi) in f2fs_build_segment_manager() argument
5265 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in f2fs_build_segment_manager()
5266 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in f2fs_build_segment_manager()
5270 sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL); in f2fs_build_segment_manager()
5275 sbi->sm_info = sm_info; in f2fs_build_segment_manager()
5288 if (!f2fs_lfs_mode(sbi)) in f2fs_build_segment_manager()
5292 sm_info->min_seq_blocks = BLKS_PER_SEG(sbi); in f2fs_build_segment_manager()
5294 sm_info->min_ssr_sections = reserved_sections(sbi); in f2fs_build_segment_manager()
5300 err = f2fs_create_flush_cmd_control(sbi); in f2fs_build_segment_manager()
5304 err = create_discard_cmd_control(sbi); in f2fs_build_segment_manager()
5308 err = build_sit_info(sbi); in f2fs_build_segment_manager()
5311 err = build_free_segmap(sbi); in f2fs_build_segment_manager()
5314 err = build_curseg(sbi); in f2fs_build_segment_manager()
5319 err = build_sit_entries(sbi); in f2fs_build_segment_manager()
5323 init_free_segmap(sbi); in f2fs_build_segment_manager()
5324 err = build_dirty_segmap(sbi); in f2fs_build_segment_manager()
5328 err = sanity_check_curseg(sbi); in f2fs_build_segment_manager()
5332 init_min_max_mtime(sbi); in f2fs_build_segment_manager()
5336 static void discard_dirty_segmap(struct f2fs_sb_info *sbi, in discard_dirty_segmap() argument
5339 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in discard_dirty_segmap()
5347 static void destroy_victim_secmap(struct f2fs_sb_info *sbi) in destroy_victim_secmap() argument
5349 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in destroy_victim_secmap()
5355 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) in destroy_dirty_segmap() argument
5357 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in destroy_dirty_segmap()
5365 discard_dirty_segmap(sbi, i); in destroy_dirty_segmap()
5367 if (__is_large_section(sbi)) { in destroy_dirty_segmap()
5373 destroy_victim_secmap(sbi); in destroy_dirty_segmap()
5374 SM_I(sbi)->dirty_info = NULL; in destroy_dirty_segmap()
5378 static void destroy_curseg(struct f2fs_sb_info *sbi) in destroy_curseg() argument
5380 struct curseg_info *array = SM_I(sbi)->curseg_array; in destroy_curseg()
5385 SM_I(sbi)->curseg_array = NULL; in destroy_curseg()
5393 static void destroy_free_segmap(struct f2fs_sb_info *sbi) in destroy_free_segmap() argument
5395 struct free_segmap_info *free_i = SM_I(sbi)->free_info; in destroy_free_segmap()
5399 SM_I(sbi)->free_info = NULL; in destroy_free_segmap()
5405 static void destroy_sit_info(struct f2fs_sb_info *sbi) in destroy_sit_info() argument
5407 struct sit_info *sit_i = SIT_I(sbi); in destroy_sit_info()
5420 SM_I(sbi)->sit_info = NULL; in destroy_sit_info()
5429 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi) in f2fs_destroy_segment_manager() argument
5431 struct f2fs_sm_info *sm_info = SM_I(sbi); in f2fs_destroy_segment_manager()
5435 f2fs_destroy_flush_cmd_control(sbi, true); in f2fs_destroy_segment_manager()
5436 destroy_discard_cmd_control(sbi); in f2fs_destroy_segment_manager()
5437 destroy_dirty_segmap(sbi); in f2fs_destroy_segment_manager()
5438 destroy_curseg(sbi); in f2fs_destroy_segment_manager()
5439 destroy_free_segmap(sbi); in f2fs_destroy_segment_manager()
5440 destroy_sit_info(sbi); in f2fs_destroy_segment_manager()
5441 sbi->sm_info = NULL; in f2fs_destroy_segment_manager()