checkpoint.c (8dd06ef34b6e2f41b29fbf5fc1663780f2524285) checkpoint.c (0b6d4ca04a86b9dababbb76e58d33c437e127b77)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/checkpoint.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>

--- 36 unchanged lines hidden (view full) ---

45 goto repeat;
46 }
47 f2fs_wait_on_page_writeback(page, META, true, true);
48 if (!PageUptodate(page))
49 SetPageUptodate(page);
50 return page;
51}
52
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/checkpoint.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>

--- 36 unchanged lines hidden (view full) ---

45 goto repeat;
46 }
47 f2fs_wait_on_page_writeback(page, META, true, true);
48 if (!PageUptodate(page))
49 SetPageUptodate(page);
50 return page;
51}
52
53/*
54 * We guarantee no failure on the returned page.
55 */
56static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
57 bool is_meta)
58{
59 struct address_space *mapping = META_MAPPING(sbi);
60 struct page *page;
61 struct f2fs_io_info fio = {
62 .sbi = sbi,
63 .type = META,

--- 20 unchanged lines hidden (view full) ---

84 fio.page = page;
85
86 err = f2fs_submit_page_bio(&fio);
87 if (err) {
88 f2fs_put_page(page, 1);
89 return ERR_PTR(err);
90 }
91
53static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
54 bool is_meta)
55{
56 struct address_space *mapping = META_MAPPING(sbi);
57 struct page *page;
58 struct f2fs_io_info fio = {
59 .sbi = sbi,
60 .type = META,

--- 20 unchanged lines hidden (view full) ---

81 fio.page = page;
82
83 err = f2fs_submit_page_bio(&fio);
84 if (err) {
85 f2fs_put_page(page, 1);
86 return ERR_PTR(err);
87 }
88
89 f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE);
90
92 lock_page(page);
93 if (unlikely(page->mapping != mapping)) {
94 f2fs_put_page(page, 1);
95 goto repeat;
96 }
97
98 if (unlikely(!PageUptodate(page))) {
99 f2fs_put_page(page, 1);

--- 101 unchanged lines hidden (view full) ---

201 default:
202 BUG();
203 }
204
205 return true;
206}
207
208/*
91 lock_page(page);
92 if (unlikely(page->mapping != mapping)) {
93 f2fs_put_page(page, 1);
94 goto repeat;
95 }
96
97 if (unlikely(!PageUptodate(page))) {
98 f2fs_put_page(page, 1);

--- 101 unchanged lines hidden (view full) ---

200 default:
201 BUG();
202 }
203
204 return true;
205}
206
207/*
209 * Readahead CP/NAT/SIT/SSA pages
208 * Readahead CP/NAT/SIT/SSA/POR pages
210 */
211int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
212 int type, bool sync)
213{
214 struct page *page;
215 block_t blkno = start;
216 struct f2fs_io_info fio = {
217 .sbi = sbi,
218 .type = META,
219 .op = REQ_OP_READ,
220 .op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
221 .encrypted_page = NULL,
222 .in_list = false,
223 .is_por = (type == META_POR),
224 };
225 struct blk_plug plug;
209 */
210int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
211 int type, bool sync)
212{
213 struct page *page;
214 block_t blkno = start;
215 struct f2fs_io_info fio = {
216 .sbi = sbi,
217 .type = META,
218 .op = REQ_OP_READ,
219 .op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
220 .encrypted_page = NULL,
221 .in_list = false,
222 .is_por = (type == META_POR),
223 };
224 struct blk_plug plug;
225 int err;
226
227 if (unlikely(type == META_POR))
228 fio.op_flags &= ~REQ_META;
229
230 blk_start_plug(&plug);
231 for (; nrpages-- > 0; blkno++) {
232
233 if (!f2fs_is_valid_blkaddr(sbi, blkno, type))

--- 27 unchanged lines hidden (view full) ---

261 if (!page)
262 continue;
263 if (PageUptodate(page)) {
264 f2fs_put_page(page, 1);
265 continue;
266 }
267
268 fio.page = page;
226
227 if (unlikely(type == META_POR))
228 fio.op_flags &= ~REQ_META;
229
230 blk_start_plug(&plug);
231 for (; nrpages-- > 0; blkno++) {
232
233 if (!f2fs_is_valid_blkaddr(sbi, blkno, type))

--- 27 unchanged lines hidden (view full) ---

261 if (!page)
262 continue;
263 if (PageUptodate(page)) {
264 f2fs_put_page(page, 1);
265 continue;
266 }
267
268 fio.page = page;
269 f2fs_submit_page_bio(&fio);
270 f2fs_put_page(page, 0);
269 err = f2fs_submit_page_bio(&fio);
270 f2fs_put_page(page, err ? 1 : 0);
271
272 if (!err)
273 f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE);
271 }
272out:
273 blk_finish_plug(&plug);
274 return blkno - start;
275}
276
277void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
278{

--- 608 unchanged lines hidden (view full) ---

887 unsigned long blk_size = sbi->blocksize;
888 unsigned long long cp1_version = 0, cp2_version = 0;
889 unsigned long long cp_start_blk_no;
890 unsigned int cp_blks = 1 + __cp_payload(sbi);
891 block_t cp_blk_no;
892 int i;
893 int err;
894
274 }
275out:
276 blk_finish_plug(&plug);
277 return blkno - start;
278}
279
280void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
281{

--- 608 unchanged lines hidden (view full) ---

890 unsigned long blk_size = sbi->blocksize;
891 unsigned long long cp1_version = 0, cp2_version = 0;
892 unsigned long long cp_start_blk_no;
893 unsigned int cp_blks = 1 + __cp_payload(sbi);
894 block_t cp_blk_no;
895 int i;
896 int err;
897
895 sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks),
896 GFP_KERNEL);
898 sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks),
899 GFP_KERNEL);
897 if (!sbi->ckpt)
898 return -ENOMEM;
899 /*
900 * Finding out valid cp block involves read both
900 if (!sbi->ckpt)
901 return -ENOMEM;
902 /*
903 * Finding out valid cp block involves read both
901 * sets( cp pack1 and cp pack 2)
904 * sets( cp pack 1 and cp pack 2)
902 */
903 cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
904 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
905
906 /* The second checkpoint pack should start at the next segment */
907 cp_start_blk_no += ((unsigned long long)1) <<
908 le32_to_cpu(fsb->log_blocks_per_seg);
909 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);

--- 248 unchanged lines hidden (view full) ---

1158 */
1159static int block_operations(struct f2fs_sb_info *sbi)
1160{
1161 struct writeback_control wbc = {
1162 .sync_mode = WB_SYNC_ALL,
1163 .nr_to_write = LONG_MAX,
1164 .for_reclaim = 0,
1165 };
905 */
906 cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
907 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
908
909 /* The second checkpoint pack should start at the next segment */
910 cp_start_blk_no += ((unsigned long long)1) <<
911 le32_to_cpu(fsb->log_blocks_per_seg);
912 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);

--- 248 unchanged lines hidden (view full) ---

1161 */
1162static int block_operations(struct f2fs_sb_info *sbi)
1163{
1164 struct writeback_control wbc = {
1165 .sync_mode = WB_SYNC_ALL,
1166 .nr_to_write = LONG_MAX,
1167 .for_reclaim = 0,
1168 };
1166 struct blk_plug plug;
1167 int err = 0, cnt = 0;
1168
1169 int err = 0, cnt = 0;
1170
1169 blk_start_plug(&plug);
1171 /*
1172 * Let's flush inline_data in dirty node pages.
1173 */
1174 f2fs_flush_inline_data(sbi);
1170
1171retry_flush_quotas:
1172 f2fs_lock_all(sbi);
1173 if (__need_flush_quota(sbi)) {
1174 int locked;
1175
1176 if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) {
1177 set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);

--- 12 unchanged lines hidden (view full) ---

1190 }
1191
1192retry_flush_dents:
1193 /* write all the dirty dentry pages */
1194 if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
1195 f2fs_unlock_all(sbi);
1196 err = f2fs_sync_dirty_inodes(sbi, DIR_INODE);
1197 if (err)
1175
1176retry_flush_quotas:
1177 f2fs_lock_all(sbi);
1178 if (__need_flush_quota(sbi)) {
1179 int locked;
1180
1181 if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) {
1182 set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);

--- 12 unchanged lines hidden (view full) ---

1195 }
1196
1197retry_flush_dents:
1198 /* write all the dirty dentry pages */
1199 if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
1200 f2fs_unlock_all(sbi);
1201 err = f2fs_sync_dirty_inodes(sbi, DIR_INODE);
1202 if (err)
1198 goto out;
1203 return err;
1199 cond_resched();
1200 goto retry_flush_quotas;
1201 }
1202
1203 /*
1204 * POR: we should ensure that there are no dirty node pages
1205 * until finishing nat/sit flush. inode->i_blocks can be updated.
1206 */
1207 down_write(&sbi->node_change);
1208
1209 if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
1210 up_write(&sbi->node_change);
1211 f2fs_unlock_all(sbi);
1212 err = f2fs_sync_inode_meta(sbi);
1213 if (err)
1204 cond_resched();
1205 goto retry_flush_quotas;
1206 }
1207
1208 /*
1209 * POR: we should ensure that there are no dirty node pages
1210 * until finishing nat/sit flush. inode->i_blocks can be updated.
1211 */
1212 down_write(&sbi->node_change);
1213
1214 if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
1215 up_write(&sbi->node_change);
1216 f2fs_unlock_all(sbi);
1217 err = f2fs_sync_inode_meta(sbi);
1218 if (err)
1214 goto out;
1219 return err;
1215 cond_resched();
1216 goto retry_flush_quotas;
1217 }
1218
1219retry_flush_nodes:
1220 down_write(&sbi->node_write);
1221
1222 if (get_pages(sbi, F2FS_DIRTY_NODES)) {
1223 up_write(&sbi->node_write);
1224 atomic_inc(&sbi->wb_sync_req[NODE]);
1225 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
1226 atomic_dec(&sbi->wb_sync_req[NODE]);
1227 if (err) {
1228 up_write(&sbi->node_change);
1229 f2fs_unlock_all(sbi);
1220 cond_resched();
1221 goto retry_flush_quotas;
1222 }
1223
1224retry_flush_nodes:
1225 down_write(&sbi->node_write);
1226
1227 if (get_pages(sbi, F2FS_DIRTY_NODES)) {
1228 up_write(&sbi->node_write);
1229 atomic_inc(&sbi->wb_sync_req[NODE]);
1230 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
1231 atomic_dec(&sbi->wb_sync_req[NODE]);
1232 if (err) {
1233 up_write(&sbi->node_change);
1234 f2fs_unlock_all(sbi);
1230 goto out;
1235 return err;
1231 }
1232 cond_resched();
1233 goto retry_flush_nodes;
1234 }
1235
1236 /*
1237 * sbi->node_change is used only for AIO write_begin path which produces
1238 * dirty node blocks and some checkpoint values by block allocation.
1239 */
1240 __prepare_cp_block(sbi);
1241 up_write(&sbi->node_change);
1236 }
1237 cond_resched();
1238 goto retry_flush_nodes;
1239 }
1240
1241 /*
1242 * sbi->node_change is used only for AIO write_begin path which produces
1243 * dirty node blocks and some checkpoint values by block allocation.
1244 */
1245 __prepare_cp_block(sbi);
1246 up_write(&sbi->node_change);
1242out:
1243 blk_finish_plug(&plug);
1244 return err;
1245}
1246
1247static void unblock_operations(struct f2fs_sb_info *sbi)
1248{
1249 up_write(&sbi->node_write);
1250 f2fs_unlock_all(sbi);
1251}
1252
1247 return err;
1248}
1249
1250static void unblock_operations(struct f2fs_sb_info *sbi)
1251{
1252 up_write(&sbi->node_write);
1253 f2fs_unlock_all(sbi);
1254}
1255
1253void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
1256void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
1254{
1255 DEFINE_WAIT(wait);
1256
1257 for (;;) {
1258 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
1259
1257{
1258 DEFINE_WAIT(wait);
1259
1260 for (;;) {
1261 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
1262
1260 if (!get_pages(sbi, F2FS_WB_CP_DATA))
1263 if (!get_pages(sbi, type))
1261 break;
1262
1263 if (unlikely(f2fs_cp_error(sbi)))
1264 break;
1265
1264 break;
1265
1266 if (unlikely(f2fs_cp_error(sbi)))
1267 break;
1268
1266 io_schedule_timeout(5*HZ);
1269 if (type == F2FS_DIRTY_META)
1270 f2fs_sync_meta_pages(sbi, META, LONG_MAX,
1271 FS_CP_META_IO);
1272 io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1267 }
1268 finish_wait(&sbi->cp_wait, &wait);
1269}
1270
1271static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1272{
1273 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
1274 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);

--- 21 unchanged lines hidden (view full) ---

1296 else
1297 __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1298
1299 if (orphan_num)
1300 __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1301 else
1302 __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1303
1273 }
1274 finish_wait(&sbi->cp_wait, &wait);
1275}
1276
1277static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1278{
1279 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
1280 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);

--- 21 unchanged lines hidden (view full) ---

1302 else
1303 __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1304
1305 if (orphan_num)
1306 __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1307 else
1308 __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1309
1304 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
1305 is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
1310 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1306 __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
1307
1311 __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
1312
1313 if (is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
1314 __set_ckpt_flags(ckpt, CP_RESIZEFS_FLAG);
1315 else
1316 __clear_ckpt_flags(ckpt, CP_RESIZEFS_FLAG);
1317
1308 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1309 __set_ckpt_flags(ckpt, CP_DISABLED_FLAG);
1310 else
1311 __clear_ckpt_flags(ckpt, CP_DISABLED_FLAG);
1312
1313 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK))
1314 __set_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
1315 else

--- 63 unchanged lines hidden (view full) ---

1379 int cp_payload_blks = __cp_payload(sbi);
1380 struct super_block *sb = sbi->sb;
1381 struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1382 u64 kbytes_written;
1383 int err;
1384
1385 /* Flush all the NAT/SIT pages */
1386 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
1318 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1319 __set_ckpt_flags(ckpt, CP_DISABLED_FLAG);
1320 else
1321 __clear_ckpt_flags(ckpt, CP_DISABLED_FLAG);
1322
1323 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK))
1324 __set_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
1325 else

--- 63 unchanged lines hidden (view full) ---

1389 int cp_payload_blks = __cp_payload(sbi);
1390 struct super_block *sb = sbi->sb;
1391 struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1392 u64 kbytes_written;
1393 int err;
1394
1395 /* Flush all the NAT/SIT pages */
1396 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
1387 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
1388 !f2fs_cp_error(sbi));
1389
1397
1390 /*
1391 * modify checkpoint
1392 * version number is already updated
1393 */
1398 /* start to update checkpoint, cp ver is already updated previously */
1394 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true));
1395 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
1396 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1397 ckpt->cur_node_segno[i] =
1398 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
1399 ckpt->cur_node_blkoff[i] =
1400 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
1401 ckpt->alloc_type[i + CURSEG_HOT_NODE] =

--- 86 unchanged lines hidden (view full) ---

1488 }
1489
1490 /* update user_block_counts */
1491 sbi->last_valid_block_count = sbi->total_valid_block_count;
1492 percpu_counter_set(&sbi->alloc_valid_block_count, 0);
1493
1494 /* Here, we have one bio having CP pack except cp pack 2 page */
1495 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
1399 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true));
1400 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
1401 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1402 ckpt->cur_node_segno[i] =
1403 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
1404 ckpt->cur_node_blkoff[i] =
1405 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
1406 ckpt->alloc_type[i + CURSEG_HOT_NODE] =

--- 86 unchanged lines hidden (view full) ---

1493 }
1494
1495 /* update user_block_counts */
1496 sbi->last_valid_block_count = sbi->total_valid_block_count;
1497 percpu_counter_set(&sbi->alloc_valid_block_count, 0);
1498
1499 /* Here, we have one bio having CP pack except cp pack 2 page */
1500 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
1496 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
1497 !f2fs_cp_error(sbi));
1501 /* Wait for all dirty meta pages to be submitted for IO */
1502 f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META);
1498
1499 /* wait for previous submitted meta pages writeback */
1503
1504 /* wait for previous submitted meta pages writeback */
1500 f2fs_wait_on_all_pages_writeback(sbi);
1505 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1501
1502 /* flush all device cache */
1503 err = f2fs_flush_device_cache(sbi);
1504 if (err)
1505 return err;
1506
1507 /* barrier and flush checkpoint cp pack 2 page if it can */
1508 commit_checkpoint(sbi, ckpt, start_blk);
1506
1507 /* flush all device cache */
1508 err = f2fs_flush_device_cache(sbi);
1509 if (err)
1510 return err;
1511
1512 /* barrier and flush checkpoint cp pack 2 page if it can */
1513 commit_checkpoint(sbi, ckpt, start_blk);
1509 f2fs_wait_on_all_pages_writeback(sbi);
1514 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1510
1511 /*
1512 * invalidate intermediate page cache borrowed from meta inode which are
1513 * used for migration of encrypted or verity inode's blocks.
1514 */
1515 if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi))
1516 invalidate_mapping_pages(META_MAPPING(sbi),
1517 MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);

--- 20 unchanged lines hidden (view full) ---

1538 get_pages(sbi, F2FS_DIRTY_IMETA))
1539 set_sbi_flag(sbi, SBI_IS_DIRTY);
1540
1541 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
1542
1543 return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
1544}
1545
1515
1516 /*
1517 * invalidate intermediate page cache borrowed from meta inode which are
1518 * used for migration of encrypted or verity inode's blocks.
1519 */
1520 if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi))
1521 invalidate_mapping_pages(META_MAPPING(sbi),
1522 MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);

--- 20 unchanged lines hidden (view full) ---

1543 get_pages(sbi, F2FS_DIRTY_IMETA))
1544 set_sbi_flag(sbi, SBI_IS_DIRTY);
1545
1546 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
1547
1548 return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
1549}
1550
1546/*
1547 * We guarantee that this checkpoint procedure will not fail.
1548 */
1549int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1550{
1551 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1552 unsigned long long ckpt_ver;
1553 int err = 0;
1554
1555 if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi))
1556 return -EROFS;
1557
1558 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1559 if (cpc->reason != CP_PAUSE)
1560 return 0;
1561 f2fs_warn(sbi, "Start checkpoint disabled!");
1562 }
1551int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1552{
1553 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1554 unsigned long long ckpt_ver;
1555 int err = 0;
1556
1557 if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi))
1558 return -EROFS;
1559
1560 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1561 if (cpc->reason != CP_PAUSE)
1562 return 0;
1563 f2fs_warn(sbi, "Start checkpoint disabled!");
1564 }
1563 mutex_lock(&sbi->cp_mutex);
1565 if (cpc->reason != CP_RESIZE)
1566 mutex_lock(&sbi->cp_mutex);
1564
1565 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
1566 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
1567 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks)))
1568 goto out;
1569 if (unlikely(f2fs_cp_error(sbi))) {
1570 err = -EIO;
1571 goto out;

--- 36 unchanged lines hidden (view full) ---

1608
1609 /* write cached NAT/SIT entries to NAT/SIT area */
1610 err = f2fs_flush_nat_entries(sbi, cpc);
1611 if (err)
1612 goto stop;
1613
1614 f2fs_flush_sit_entries(sbi, cpc);
1615
1567
1568 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
1569 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
1570 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks)))
1571 goto out;
1572 if (unlikely(f2fs_cp_error(sbi))) {
1573 err = -EIO;
1574 goto out;

--- 36 unchanged lines hidden (view full) ---

1611
1612 /* write cached NAT/SIT entries to NAT/SIT area */
1613 err = f2fs_flush_nat_entries(sbi, cpc);
1614 if (err)
1615 goto stop;
1616
1617 f2fs_flush_sit_entries(sbi, cpc);
1618
1616 /* unlock all the fs_lock[] in do_checkpoint() */
1617 err = do_checkpoint(sbi, cpc);
1618 if (err)
1619 f2fs_release_discard_addrs(sbi);
1620 else
1621 f2fs_clear_prefree_segments(sbi, cpc);
1622stop:
1623 unblock_operations(sbi);
1624 stat_inc_cp_count(sbi->stat_info);
1625
1626 if (cpc->reason & CP_RECOVERY)
1627 f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
1628
1619 err = do_checkpoint(sbi, cpc);
1620 if (err)
1621 f2fs_release_discard_addrs(sbi);
1622 else
1623 f2fs_clear_prefree_segments(sbi, cpc);
1624stop:
1625 unblock_operations(sbi);
1626 stat_inc_cp_count(sbi->stat_info);
1627
1628 if (cpc->reason & CP_RECOVERY)
1629 f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
1630
1629 /* do checkpoint periodically */
1631 /* update CP_TIME to trigger checkpoint periodically */
1630 f2fs_update_time(sbi, CP_TIME);
1631 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
1632out:
1632 f2fs_update_time(sbi, CP_TIME);
1633 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
1634out:
1633 mutex_unlock(&sbi->cp_mutex);
1635 if (cpc->reason != CP_RESIZE)
1636 mutex_unlock(&sbi->cp_mutex);
1634 return err;
1635}
1636
1637void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
1638{
1639 int i;
1640
1641 for (i = 0; i < MAX_INO_ENTRY; i++) {

--- 33 unchanged lines hidden ---
1637 return err;
1638}
1639
1640void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
1641{
1642 int i;
1643
1644 for (i = 0; i < MAX_INO_ENTRY; i++) {

--- 33 unchanged lines hidden ---