checkpoint.c (0337966d121ebebf73a1c346123e8112796e684e) checkpoint.c (c473f1a9658b6c23d576136d5a49b1c731ef1767)
1/*
2 * fs/f2fs/checkpoint.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 261 unchanged lines hidden (view full) ---

270 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
271 long diff, written;
272
273 /* collect a number of dirty meta pages and write together */
274 if (wbc->for_kupdate ||
275 get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
276 goto skip_write;
277
1/*
2 * fs/f2fs/checkpoint.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 261 unchanged lines hidden (view full) ---

270 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
271 long diff, written;
272
273 /* collect a number of dirty meta pages and write together */
274 if (wbc->for_kupdate ||
275 get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
276 goto skip_write;
277
278 trace_f2fs_writepages(mapping->host, wbc, META);
278 /* if locked failed, cp will flush dirty pages instead */
279 if (!mutex_trylock(&sbi->cp_mutex))
280 goto skip_write;
279
281
280 /* if mounting is failed, skip writing node pages */
281 mutex_lock(&sbi->cp_mutex);
282 trace_f2fs_writepages(mapping->host, wbc, META);
282 diff = nr_pages_to_write(sbi, META, wbc);
283 written = sync_meta_pages(sbi, META, wbc->nr_to_write);
284 mutex_unlock(&sbi->cp_mutex);
285 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
286 return 0;
287
288skip_write:
289 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);

--- 272 unchanged lines hidden (view full) ---

562 iput(inode);
563
564 get_node_info(sbi, ino, &ni);
565
566 /* ENOMEM was fully retried in f2fs_evict_inode. */
567 if (ni.blk_addr != NULL_ADDR) {
568 set_sbi_flag(sbi, SBI_NEED_FSCK);
569 f2fs_msg(sbi->sb, KERN_WARNING,
283 diff = nr_pages_to_write(sbi, META, wbc);
284 written = sync_meta_pages(sbi, META, wbc->nr_to_write);
285 mutex_unlock(&sbi->cp_mutex);
286 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
287 return 0;
288
289skip_write:
290 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);

--- 272 unchanged lines hidden (view full) ---

563 iput(inode);
564
565 get_node_info(sbi, ino, &ni);
566
567 /* ENOMEM was fully retried in f2fs_evict_inode. */
568 if (ni.blk_addr != NULL_ADDR) {
569 set_sbi_flag(sbi, SBI_NEED_FSCK);
570 f2fs_msg(sbi->sb, KERN_WARNING,
570 "%s: orphan failed (ino=%x), run fsck to fix.",
571 "%s: orphan failed (ino=%x) by kernel, retry mount.",
571 __func__, ino);
572 return -EIO;
573 }
574 __remove_ino_entry(sbi, ino, ORPHAN_INO);
575 return 0;
576}
577
578int recover_orphan_inodes(struct f2fs_sb_info *sbi)

--- 93 unchanged lines hidden (view full) ---

672 unsigned long blk_size = sbi->blocksize;
673 size_t crc_offset = 0;
674 __u32 crc = 0;
675
676 *cp_page = get_meta_page(sbi, cp_addr);
677 *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
678
679 crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
572 __func__, ino);
573 return -EIO;
574 }
575 __remove_ino_entry(sbi, ino, ORPHAN_INO);
576 return 0;
577}
578
579int recover_orphan_inodes(struct f2fs_sb_info *sbi)

--- 93 unchanged lines hidden (view full) ---

673 unsigned long blk_size = sbi->blocksize;
674 size_t crc_offset = 0;
675 __u32 crc = 0;
676
677 *cp_page = get_meta_page(sbi, cp_addr);
678 *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
679
680 crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
680 if (crc_offset >= blk_size) {
681 if (crc_offset > (blk_size - sizeof(__le32))) {
681 f2fs_msg(sbi->sb, KERN_WARNING,
682 "invalid crc_offset: %zu", crc_offset);
683 return -EINVAL;
684 }
685
686 crc = cur_cp_crc(*cp_block);
687 if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
688 f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");

--- 122 unchanged lines hidden (view full) ---

811{
812 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
813 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
814
815 if (is_inode_flag_set(inode, flag))
816 return;
817
818 set_inode_flag(inode, flag);
682 f2fs_msg(sbi->sb, KERN_WARNING,
683 "invalid crc_offset: %zu", crc_offset);
684 return -EINVAL;
685 }
686
687 crc = cur_cp_crc(*cp_block);
688 if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
689 f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");

--- 122 unchanged lines hidden (view full) ---

812{
813 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
814 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
815
816 if (is_inode_flag_set(inode, flag))
817 return;
818
819 set_inode_flag(inode, flag);
819 list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
820 if (!f2fs_is_volatile_file(inode))
821 list_add_tail(&F2FS_I(inode)->dirty_list,
822 &sbi->inode_list[type]);
820 stat_inc_dirty_inode(sbi, type);
821}
822
823static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
824{
825 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
826
827 if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))

--- 108 unchanged lines hidden (view full) ---

936 if (is_inode_flag_set(inode, FI_DIRTY_INODE))
937 update_inode_page(inode);
938 iput(inode);
939 }
940 };
941 return 0;
942}
943
823 stat_inc_dirty_inode(sbi, type);
824}
825
826static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
827{
828 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
829
830 if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))

--- 108 unchanged lines hidden (view full) ---

939 if (is_inode_flag_set(inode, FI_DIRTY_INODE))
940 update_inode_page(inode);
941 iput(inode);
942 }
943 };
944 return 0;
945}
946
947static void __prepare_cp_block(struct f2fs_sb_info *sbi)
948{
949 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
950 struct f2fs_nm_info *nm_i = NM_I(sbi);
951 nid_t last_nid = nm_i->next_scan_nid;
952
953 next_free_nid(sbi, &last_nid);
954 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
955 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
956 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
957 ckpt->next_free_nid = cpu_to_le32(last_nid);
958}
959
944/*
945 * Freeze all the FS-operations for checkpoint.
946 */
947static int block_operations(struct f2fs_sb_info *sbi)
948{
949 struct writeback_control wbc = {
950 .sync_mode = WB_SYNC_ALL,
951 .nr_to_write = LONG_MAX,

--- 7 unchanged lines hidden (view full) ---

959retry_flush_dents:
960 f2fs_lock_all(sbi);
961 /* write all the dirty dentry pages */
962 if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
963 f2fs_unlock_all(sbi);
964 err = sync_dirty_inodes(sbi, DIR_INODE);
965 if (err)
966 goto out;
960/*
961 * Freeze all the FS-operations for checkpoint.
962 */
963static int block_operations(struct f2fs_sb_info *sbi)
964{
965 struct writeback_control wbc = {
966 .sync_mode = WB_SYNC_ALL,
967 .nr_to_write = LONG_MAX,

--- 7 unchanged lines hidden (view full) ---

975retry_flush_dents:
976 f2fs_lock_all(sbi);
977 /* write all the dirty dentry pages */
978 if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
979 f2fs_unlock_all(sbi);
980 err = sync_dirty_inodes(sbi, DIR_INODE);
981 if (err)
982 goto out;
983 cond_resched();
967 goto retry_flush_dents;
968 }
969
984 goto retry_flush_dents;
985 }
986
987 /*
988 * POR: we should ensure that there are no dirty node pages
989 * until finishing nat/sit flush. inode->i_blocks can be updated.
990 */
991 down_write(&sbi->node_change);
992
970 if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
993 if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
994 up_write(&sbi->node_change);
971 f2fs_unlock_all(sbi);
972 err = f2fs_sync_inode_meta(sbi);
973 if (err)
974 goto out;
995 f2fs_unlock_all(sbi);
996 err = f2fs_sync_inode_meta(sbi);
997 if (err)
998 goto out;
999 cond_resched();
975 goto retry_flush_dents;
976 }
977
1000 goto retry_flush_dents;
1001 }
1002
978 /*
979 * POR: we should ensure that there are no dirty node pages
980 * until finishing nat/sit flush.
981 */
982retry_flush_nodes:
983 down_write(&sbi->node_write);
984
985 if (get_pages(sbi, F2FS_DIRTY_NODES)) {
986 up_write(&sbi->node_write);
987 err = sync_node_pages(sbi, &wbc);
988 if (err) {
1003retry_flush_nodes:
1004 down_write(&sbi->node_write);
1005
1006 if (get_pages(sbi, F2FS_DIRTY_NODES)) {
1007 up_write(&sbi->node_write);
1008 err = sync_node_pages(sbi, &wbc);
1009 if (err) {
1010 up_write(&sbi->node_change);
989 f2fs_unlock_all(sbi);
990 goto out;
991 }
1011 f2fs_unlock_all(sbi);
1012 goto out;
1013 }
1014 cond_resched();
992 goto retry_flush_nodes;
993 }
1015 goto retry_flush_nodes;
1016 }
1017
1018 /*
1019 * sbi->node_change is used only for AIO write_begin path which produces
1020 * dirty node blocks and some checkpoint values by block allocation.
1021 */
1022 __prepare_cp_block(sbi);
1023 up_write(&sbi->node_change);
994out:
995 blk_finish_plug(&plug);
996 return err;
997}
998
999static void unblock_operations(struct f2fs_sb_info *sbi)
1000{
1001 up_write(&sbi->node_write);

--- 17 unchanged lines hidden (view full) ---

1019
1020static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1021{
1022 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
1023 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1024
1025 spin_lock(&sbi->cp_lock);
1026
1024out:
1025 blk_finish_plug(&plug);
1026 return err;
1027}
1028
1029static void unblock_operations(struct f2fs_sb_info *sbi)
1030{
1031 up_write(&sbi->node_write);

--- 17 unchanged lines hidden (view full) ---

1049
1050static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1051{
1052 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
1053 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1054
1055 spin_lock(&sbi->cp_lock);
1056
1027 if (cpc->reason == CP_UMOUNT && ckpt->cp_pack_total_block_count >
1057 if ((cpc->reason & CP_UMOUNT) &&
1058 le32_to_cpu(ckpt->cp_pack_total_block_count) >
1028 sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
1029 disable_nat_bits(sbi, false);
1030
1059 sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
1060 disable_nat_bits(sbi, false);
1061
1031 if (cpc->reason == CP_UMOUNT)
1062 if (cpc->reason & CP_UMOUNT)
1032 __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1033 else
1034 __clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1035
1063 __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1064 else
1065 __clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1066
1036 if (cpc->reason == CP_FASTBOOT)
1067 if (cpc->reason & CP_FASTBOOT)
1037 __set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1038 else
1039 __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1040
1041 if (orphan_num)
1042 __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1043 else
1044 __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);

--- 7 unchanged lines hidden (view full) ---

1052 spin_unlock(&sbi->cp_lock);
1053}
1054
1055static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1056{
1057 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1058 struct f2fs_nm_info *nm_i = NM_I(sbi);
1059 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
1068 __set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1069 else
1070 __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1071
1072 if (orphan_num)
1073 __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1074 else
1075 __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);

--- 7 unchanged lines hidden (view full) ---

1083 spin_unlock(&sbi->cp_lock);
1084}
1085
1086static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1087{
1088 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1089 struct f2fs_nm_info *nm_i = NM_I(sbi);
1090 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
1060 nid_t last_nid = nm_i->next_scan_nid;
1061 block_t start_blk;
1062 unsigned int data_sum_blocks, orphan_blocks;
1063 __u32 crc32 = 0;
1064 int i;
1065 int cp_payload_blks = __cp_payload(sbi);
1066 struct super_block *sb = sbi->sb;
1067 struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1068 u64 kbytes_written;
1069
1070 /* Flush all the NAT/SIT pages */
1071 while (get_pages(sbi, F2FS_DIRTY_META)) {
1072 sync_meta_pages(sbi, META, LONG_MAX);
1073 if (unlikely(f2fs_cp_error(sbi)))
1074 return -EIO;
1075 }
1076
1091 block_t start_blk;
1092 unsigned int data_sum_blocks, orphan_blocks;
1093 __u32 crc32 = 0;
1094 int i;
1095 int cp_payload_blks = __cp_payload(sbi);
1096 struct super_block *sb = sbi->sb;
1097 struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1098 u64 kbytes_written;
1099
1100 /* Flush all the NAT/SIT pages */
1101 while (get_pages(sbi, F2FS_DIRTY_META)) {
1102 sync_meta_pages(sbi, META, LONG_MAX);
1103 if (unlikely(f2fs_cp_error(sbi)))
1104 return -EIO;
1105 }
1106
1077 next_free_nid(sbi, &last_nid);
1078
1079 /*
1080 * modify checkpoint
1081 * version number is already updated
1082 */
1083 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
1107 /*
1108 * modify checkpoint
1109 * version number is already updated
1110 */
1111 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
1084 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
1085 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
1086 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1087 ckpt->cur_node_segno[i] =
1088 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
1089 ckpt->cur_node_blkoff[i] =
1090 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
1091 ckpt->alloc_type[i + CURSEG_HOT_NODE] =
1092 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
1093 }
1094 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
1095 ckpt->cur_data_segno[i] =
1096 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
1097 ckpt->cur_data_blkoff[i] =
1098 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
1099 ckpt->alloc_type[i + CURSEG_HOT_DATA] =
1100 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
1101 }
1102
1112 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
1113 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1114 ckpt->cur_node_segno[i] =
1115 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
1116 ckpt->cur_node_blkoff[i] =
1117 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
1118 ckpt->alloc_type[i + CURSEG_HOT_NODE] =
1119 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
1120 }
1121 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
1122 ckpt->cur_data_segno[i] =
1123 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
1124 ckpt->cur_data_blkoff[i] =
1125 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
1126 ckpt->alloc_type[i + CURSEG_HOT_DATA] =
1127 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
1128 }
1129
1103 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
1104 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
1105 ckpt->next_free_nid = cpu_to_le32(last_nid);
1106
1107 /* 2 cp + n data seg summary + orphan inode blocks */
1108 data_sum_blocks = npages_for_summary_flush(sbi, false);
1109 spin_lock(&sbi->cp_lock);
1110 if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
1111 __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
1112 else
1113 __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
1114 spin_unlock(&sbi->cp_lock);

--- 23 unchanged lines hidden (view full) ---

1138 le32_to_cpu(ckpt->checksum_offset)))
1139 = cpu_to_le32(crc32);
1140
1141 start_blk = __start_cp_next_addr(sbi);
1142
1143 /* write nat bits */
1144 if (enabled_nat_bits(sbi, cpc)) {
1145 __u64 cp_ver = cur_cp_version(ckpt);
1130 /* 2 cp + n data seg summary + orphan inode blocks */
1131 data_sum_blocks = npages_for_summary_flush(sbi, false);
1132 spin_lock(&sbi->cp_lock);
1133 if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
1134 __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
1135 else
1136 __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
1137 spin_unlock(&sbi->cp_lock);

--- 23 unchanged lines hidden (view full) ---

1161 le32_to_cpu(ckpt->checksum_offset)))
1162 = cpu_to_le32(crc32);
1163
1164 start_blk = __start_cp_next_addr(sbi);
1165
1166 /* write nat bits */
1167 if (enabled_nat_bits(sbi, cpc)) {
1168 __u64 cp_ver = cur_cp_version(ckpt);
1146 unsigned int i;
1147 block_t blk;
1148
1149 cp_ver |= ((__u64)crc32 << 32);
1150 *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
1151
1152 blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
1153 for (i = 0; i < nm_i->nat_bits_blocks; i++)
1154 update_meta_page(sbi, nm_i->nat_bits +

--- 90 unchanged lines hidden (view full) ---

1245{
1246 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1247 unsigned long long ckpt_ver;
1248 int err = 0;
1249
1250 mutex_lock(&sbi->cp_mutex);
1251
1252 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
1169 block_t blk;
1170
1171 cp_ver |= ((__u64)crc32 << 32);
1172 *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
1173
1174 blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
1175 for (i = 0; i < nm_i->nat_bits_blocks; i++)
1176 update_meta_page(sbi, nm_i->nat_bits +

--- 90 unchanged lines hidden (view full) ---

1267{
1268 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1269 unsigned long long ckpt_ver;
1270 int err = 0;
1271
1272 mutex_lock(&sbi->cp_mutex);
1273
1274 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
1253 (cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC ||
1254 (cpc->reason == CP_DISCARD && !sbi->discard_blks)))
1275 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
1276 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks)))
1255 goto out;
1256 if (unlikely(f2fs_cp_error(sbi))) {
1257 err = -EIO;
1258 goto out;
1259 }
1260 if (f2fs_readonly(sbi->sb)) {
1261 err = -EROFS;
1262 goto out;

--- 5 unchanged lines hidden (view full) ---

1268 if (err)
1269 goto out;
1270
1271 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
1272
1273 f2fs_flush_merged_bios(sbi);
1274
1275 /* this is the case of multiple fstrims without any changes */
1277 goto out;
1278 if (unlikely(f2fs_cp_error(sbi))) {
1279 err = -EIO;
1280 goto out;
1281 }
1282 if (f2fs_readonly(sbi->sb)) {
1283 err = -EROFS;
1284 goto out;

--- 5 unchanged lines hidden (view full) ---

1290 if (err)
1291 goto out;
1292
1293 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
1294
1295 f2fs_flush_merged_bios(sbi);
1296
1297 /* this is the case of multiple fstrims without any changes */
1276 if (cpc->reason == CP_DISCARD) {
1298 if (cpc->reason & CP_DISCARD) {
1277 if (!exist_trim_candidates(sbi, cpc)) {
1278 unblock_operations(sbi);
1279 goto out;
1280 }
1281
1282 if (NM_I(sbi)->dirty_nat_cnt == 0 &&
1283 SIT_I(sbi)->dirty_sentries == 0 &&
1284 prefree_segments(sbi) == 0) {

--- 21 unchanged lines hidden (view full) ---

1306 if (err)
1307 release_discard_addrs(sbi);
1308 else
1309 clear_prefree_segments(sbi, cpc);
1310
1311 unblock_operations(sbi);
1312 stat_inc_cp_count(sbi->stat_info);
1313
1299 if (!exist_trim_candidates(sbi, cpc)) {
1300 unblock_operations(sbi);
1301 goto out;
1302 }
1303
1304 if (NM_I(sbi)->dirty_nat_cnt == 0 &&
1305 SIT_I(sbi)->dirty_sentries == 0 &&
1306 prefree_segments(sbi) == 0) {

--- 21 unchanged lines hidden (view full) ---

1328 if (err)
1329 release_discard_addrs(sbi);
1330 else
1331 clear_prefree_segments(sbi, cpc);
1332
1333 unblock_operations(sbi);
1334 stat_inc_cp_count(sbi->stat_info);
1335
1314 if (cpc->reason == CP_RECOVERY)
1336 if (cpc->reason & CP_RECOVERY)
1315 f2fs_msg(sbi->sb, KERN_NOTICE,
1316 "checkpoint: version = %llx", ckpt_ver);
1317
1318 /* do checkpoint periodically */
1319 f2fs_update_time(sbi, CP_TIME);
1320 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
1321out:
1322 mutex_unlock(&sbi->cp_mutex);

--- 41 unchanged lines hidden ---
1337 f2fs_msg(sbi->sb, KERN_NOTICE,
1338 "checkpoint: version = %llx", ckpt_ver);
1339
1340 /* do checkpoint periodically */
1341 f2fs_update_time(sbi, CP_TIME);
1342 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
1343out:
1344 mutex_unlock(&sbi->cp_mutex);

--- 41 unchanged lines hidden ---