checkpoint.c (876f7a438e4247a948268ad77b67c494f709cc30) | checkpoint.c (430f163b01888dc26696365d9c1053ba9d6c7d92) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * fs/f2fs/checkpoint.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8#include <linux/fs.h> --- 268 unchanged lines hidden (view full) --- 277 if (!err) 278 f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE); 279 } 280out: 281 blk_finish_plug(&plug); 282 return blkno - start; 283} 284 | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * fs/f2fs/checkpoint.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8#include <linux/fs.h> --- 268 unchanged lines hidden (view full) --- 277 if (!err) 278 f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE); 279 } 280out: 281 blk_finish_plug(&plug); 282 return blkno - start; 283} 284 |
285void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index) | 285void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index, 286 unsigned int ra_blocks) |
286{ 287 struct page *page; 288 bool readahead = false; 289 | 287{ 288 struct page *page; 289 bool readahead = false; 290 |
291 if (ra_blocks == RECOVERY_MIN_RA_BLOCKS) 292 return; 293 |
|
290 page = find_get_page(META_MAPPING(sbi), index); 291 if (!page || !PageUptodate(page)) 292 readahead = true; 293 f2fs_put_page(page, 0); 294 295 if (readahead) | 294 page = find_get_page(META_MAPPING(sbi), index); 295 if (!page || !PageUptodate(page)) 296 readahead = true; 297 f2fs_put_page(page, 0); 298 299 if (readahead) |
296 f2fs_ra_meta_pages(sbi, index, BIO_MAX_VECS, META_POR, true); | 300 f2fs_ra_meta_pages(sbi, index, ra_blocks, META_POR, true); |
297} 298 299static int __f2fs_write_meta_page(struct page *page, 300 struct writeback_control *wbc, 301 enum iostat_type io_type) 302{ 303 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 304 --- 41 unchanged lines hidden (view full) --- 346 347 /* collect a number of dirty meta pages and write together */ 348 if (wbc->sync_mode != WB_SYNC_ALL && 349 get_pages(sbi, F2FS_DIRTY_META) < 350 nr_pages_to_skip(sbi, META)) 351 goto skip_write; 352 353 /* if locked failed, cp will flush dirty pages instead */ | 301} 302 303static int __f2fs_write_meta_page(struct page *page, 304 struct writeback_control *wbc, 305 enum iostat_type io_type) 306{ 307 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 308 --- 41 unchanged lines hidden (view full) --- 350 351 /* collect a number of dirty meta pages and write together */ 352 if (wbc->sync_mode != WB_SYNC_ALL && 353 get_pages(sbi, F2FS_DIRTY_META) < 354 nr_pages_to_skip(sbi, META)) 355 goto skip_write; 356 357 /* if locked failed, cp will flush dirty pages instead */ |
354 if (!down_write_trylock(&sbi->cp_global_sem)) | 358 if (!f2fs_down_write_trylock(&sbi->cp_global_sem)) |
355 goto skip_write; 356 357 trace_f2fs_writepages(mapping->host, wbc, META); 358 diff = nr_pages_to_write(sbi, META, wbc); 359 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO); | 359 goto skip_write; 360 361 trace_f2fs_writepages(mapping->host, wbc, META); 362 diff = nr_pages_to_write(sbi, META, wbc); 363 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO); |
360 up_write(&sbi->cp_global_sem); | 364 f2fs_up_write(&sbi->cp_global_sem); |
361 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff); 362 return 0; 363 364skip_write: 365 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META); 366 trace_f2fs_writepages(mapping->host, wbc, META); 367 return 0; 368} --- 785 unchanged lines hidden (view full) --- 1154 1155static bool __need_flush_quota(struct f2fs_sb_info *sbi) 1156{ 1157 bool ret = false; 1158 1159 if (!is_journalled_quota(sbi)) 1160 return false; 1161 | 365 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff); 366 return 0; 367 368skip_write: 369 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META); 370 trace_f2fs_writepages(mapping->host, wbc, META); 371 return 0; 372} --- 785 unchanged lines hidden (view full) --- 1158 1159static bool __need_flush_quota(struct f2fs_sb_info *sbi) 1160{ 1161 bool ret = false; 1162 1163 if (!is_journalled_quota(sbi)) 1164 return false; 1165 |
1162 if (!down_write_trylock(&sbi->quota_sem)) | 1166 if (!f2fs_down_write_trylock(&sbi->quota_sem)) |
1163 return true; 1164 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) { 1165 ret = false; 1166 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) { 1167 ret = false; 1168 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) { 1169 clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); 1170 ret = true; 1171 } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) { 1172 ret = true; 1173 } | 1167 return true; 1168 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) { 1169 ret = false; 1170 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) { 1171 ret = false; 1172 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) { 1173 clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); 1174 ret = true; 1175 } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) { 1176 ret = true; 1177 } |
1174 up_write(&sbi->quota_sem); | 1178 f2fs_up_write(&sbi->quota_sem); |
1175 return ret; 1176} 1177 1178/* 1179 * Freeze all the FS-operations for checkpoint. 1180 */ 1181static int block_operations(struct f2fs_sb_info *sbi) 1182{ --- 40 unchanged lines hidden (view full) --- 1223 cond_resched(); 1224 goto retry_flush_quotas; 1225 } 1226 1227 /* 1228 * POR: we should ensure that there are no dirty node pages 1229 * until finishing nat/sit flush. inode->i_blocks can be updated. 1230 */ | 1179 return ret; 1180} 1181 1182/* 1183 * Freeze all the FS-operations for checkpoint. 1184 */ 1185static int block_operations(struct f2fs_sb_info *sbi) 1186{ --- 40 unchanged lines hidden (view full) --- 1227 cond_resched(); 1228 goto retry_flush_quotas; 1229 } 1230 1231 /* 1232 * POR: we should ensure that there are no dirty node pages 1233 * until finishing nat/sit flush. inode->i_blocks can be updated. 1234 */ |
1231 down_write(&sbi->node_change); | 1235 f2fs_down_write(&sbi->node_change); |
1232 1233 if (get_pages(sbi, F2FS_DIRTY_IMETA)) { | 1236 1237 if (get_pages(sbi, F2FS_DIRTY_IMETA)) { |
1234 up_write(&sbi->node_change); | 1238 f2fs_up_write(&sbi->node_change); |
1235 f2fs_unlock_all(sbi); 1236 err = f2fs_sync_inode_meta(sbi); 1237 if (err) 1238 return err; 1239 cond_resched(); 1240 goto retry_flush_quotas; 1241 } 1242 1243retry_flush_nodes: | 1239 f2fs_unlock_all(sbi); 1240 err = f2fs_sync_inode_meta(sbi); 1241 if (err) 1242 return err; 1243 cond_resched(); 1244 goto retry_flush_quotas; 1245 } 1246 1247retry_flush_nodes: |
1244 down_write(&sbi->node_write); | 1248 f2fs_down_write(&sbi->node_write); |
1245 1246 if (get_pages(sbi, F2FS_DIRTY_NODES)) { | 1249 1250 if (get_pages(sbi, F2FS_DIRTY_NODES)) { |
1247 up_write(&sbi->node_write); | 1251 f2fs_up_write(&sbi->node_write); |
1248 atomic_inc(&sbi->wb_sync_req[NODE]); 1249 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO); 1250 atomic_dec(&sbi->wb_sync_req[NODE]); 1251 if (err) { | 1252 atomic_inc(&sbi->wb_sync_req[NODE]); 1253 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO); 1254 atomic_dec(&sbi->wb_sync_req[NODE]); 1255 if (err) { |
1252 up_write(&sbi->node_change); | 1256 f2fs_up_write(&sbi->node_change); |
1253 f2fs_unlock_all(sbi); 1254 return err; 1255 } 1256 cond_resched(); 1257 goto retry_flush_nodes; 1258 } 1259 1260 /* 1261 * sbi->node_change is used only for AIO write_begin path which produces 1262 * dirty node blocks and some checkpoint values by block allocation. 1263 */ 1264 __prepare_cp_block(sbi); | 1257 f2fs_unlock_all(sbi); 1258 return err; 1259 } 1260 cond_resched(); 1261 goto retry_flush_nodes; 1262 } 1263 1264 /* 1265 * sbi->node_change is used only for AIO write_begin path which produces 1266 * dirty node blocks and some checkpoint values by block allocation. 1267 */ 1268 __prepare_cp_block(sbi); |
1265 up_write(&sbi->node_change); | 1269 f2fs_up_write(&sbi->node_change); |
1266 return err; 1267} 1268 1269static void unblock_operations(struct f2fs_sb_info *sbi) 1270{ | 1270 return err; 1271} 1272 1273static void unblock_operations(struct f2fs_sb_info *sbi) 1274{ |
1271 up_write(&sbi->node_write); | 1275 f2fs_up_write(&sbi->node_write); |
1272 f2fs_unlock_all(sbi); 1273} 1274 1275void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type) 1276{ 1277 DEFINE_WAIT(wait); 1278 1279 for (;;) { --- 327 unchanged lines hidden (view full) --- 1607 return -EROFS; 1608 1609 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 1610 if (cpc->reason != CP_PAUSE) 1611 return 0; 1612 f2fs_warn(sbi, "Start checkpoint disabled!"); 1613 } 1614 if (cpc->reason != CP_RESIZE) | 1276 f2fs_unlock_all(sbi); 1277} 1278 1279void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type) 1280{ 1281 DEFINE_WAIT(wait); 1282 1283 for (;;) { --- 327 unchanged lines hidden (view full) --- 1611 return -EROFS; 1612 1613 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 1614 if (cpc->reason != CP_PAUSE) 1615 return 0; 1616 f2fs_warn(sbi, "Start checkpoint disabled!"); 1617 } 1618 if (cpc->reason != CP_RESIZE) |
1615 down_write(&sbi->cp_global_sem); | 1619 f2fs_down_write(&sbi->cp_global_sem); |
1616 1617 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && 1618 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) || 1619 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks))) 1620 goto out; 1621 if (unlikely(f2fs_cp_error(sbi))) { 1622 err = -EIO; 1623 goto out; --- 64 unchanged lines hidden (view full) --- 1688 if (cpc->reason & CP_RECOVERY) 1689 f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver); 1690 1691 /* update CP_TIME to trigger checkpoint periodically */ 1692 f2fs_update_time(sbi, CP_TIME); 1693 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); 1694out: 1695 if (cpc->reason != CP_RESIZE) | 1620 1621 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && 1622 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) || 1623 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks))) 1624 goto out; 1625 if (unlikely(f2fs_cp_error(sbi))) { 1626 err = -EIO; 1627 goto out; --- 64 unchanged lines hidden (view full) --- 1692 if (cpc->reason & CP_RECOVERY) 1693 f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver); 1694 1695 /* update CP_TIME to trigger checkpoint periodically */ 1696 f2fs_update_time(sbi, CP_TIME); 1697 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); 1698out: 1699 if (cpc->reason != CP_RESIZE) |
1696 up_write(&sbi->cp_global_sem); | 1700 f2fs_up_write(&sbi->cp_global_sem); |
1697 return err; 1698} 1699 1700void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi) 1701{ 1702 int i; 1703 1704 for (i = 0; i < MAX_INO_ENTRY; i++) { --- 31 unchanged lines hidden (view full) --- 1736 kmem_cache_destroy(f2fs_inode_entry_slab); 1737} 1738 1739static int __write_checkpoint_sync(struct f2fs_sb_info *sbi) 1740{ 1741 struct cp_control cpc = { .reason = CP_SYNC, }; 1742 int err; 1743 | 1701 return err; 1702} 1703 1704void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi) 1705{ 1706 int i; 1707 1708 for (i = 0; i < MAX_INO_ENTRY; i++) { --- 31 unchanged lines hidden (view full) --- 1740 kmem_cache_destroy(f2fs_inode_entry_slab); 1741} 1742 1743static int __write_checkpoint_sync(struct f2fs_sb_info *sbi) 1744{ 1745 struct cp_control cpc = { .reason = CP_SYNC, }; 1746 int err; 1747 |
1744 down_write(&sbi->gc_lock); | 1748 f2fs_down_write(&sbi->gc_lock); |
1745 err = f2fs_write_checkpoint(sbi, &cpc); | 1749 err = f2fs_write_checkpoint(sbi, &cpc); |
1746 up_write(&sbi->gc_lock); | 1750 f2fs_up_write(&sbi->gc_lock); |
1747 1748 return err; 1749} 1750 1751static void __checkpoint_and_complete_reqs(struct f2fs_sb_info *sbi) 1752{ 1753 struct ckpt_req_control *cprc = &sbi->cprc_info; 1754 struct ckpt_req *req, *next; --- 71 unchanged lines hidden (view full) --- 1826 struct ckpt_req_control *cprc = &sbi->cprc_info; 1827 struct ckpt_req req; 1828 struct cp_control cpc; 1829 1830 cpc.reason = __get_cp_reason(sbi); 1831 if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) { 1832 int ret; 1833 | 1751 1752 return err; 1753} 1754 1755static void __checkpoint_and_complete_reqs(struct f2fs_sb_info *sbi) 1756{ 1757 struct ckpt_req_control *cprc = &sbi->cprc_info; 1758 struct ckpt_req *req, *next; --- 71 unchanged lines hidden (view full) --- 1830 struct ckpt_req_control *cprc = &sbi->cprc_info; 1831 struct ckpt_req req; 1832 struct cp_control cpc; 1833 1834 cpc.reason = __get_cp_reason(sbi); 1835 if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) { 1836 int ret; 1837 |
1834 down_write(&sbi->gc_lock); | 1838 f2fs_down_write(&sbi->gc_lock); |
1835 ret = f2fs_write_checkpoint(sbi, &cpc); | 1839 ret = f2fs_write_checkpoint(sbi, &cpc); |
1836 up_write(&sbi->gc_lock); | 1840 f2fs_up_write(&sbi->gc_lock); |
1837 1838 return ret; 1839 } 1840 1841 if (!cprc->f2fs_issue_ckpt) 1842 return __write_checkpoint_sync(sbi); 1843 1844 init_ckpt_req(&req); --- 68 unchanged lines hidden --- | 1841 1842 return ret; 1843 } 1844 1845 if (!cprc->f2fs_issue_ckpt) 1846 return __write_checkpoint_sync(sbi); 1847 1848 init_ckpt_req(&req); --- 68 unchanged lines hidden --- |