checkpoint.c (1d1df41c5a33359a00e919d54eaebfb789711fdc) checkpoint.c (e4544b63a7ee49e7fbebf35ece0a6acd3b9617ae)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/checkpoint.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>

--- 337 unchanged lines hidden (view full) ---

346
347 /* collect a number of dirty meta pages and write together */
348 if (wbc->sync_mode != WB_SYNC_ALL &&
349 get_pages(sbi, F2FS_DIRTY_META) <
350 nr_pages_to_skip(sbi, META))
351 goto skip_write;
352
353 /* if locked failed, cp will flush dirty pages instead */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/checkpoint.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>

--- 337 unchanged lines hidden (view full) ---

346
347 /* collect a number of dirty meta pages and write together */
348 if (wbc->sync_mode != WB_SYNC_ALL &&
349 get_pages(sbi, F2FS_DIRTY_META) <
350 nr_pages_to_skip(sbi, META))
351 goto skip_write;
352
353 /* if locked failed, cp will flush dirty pages instead */
354 if (!down_write_trylock(&sbi->cp_global_sem))
354 if (!f2fs_down_write_trylock(&sbi->cp_global_sem))
355 goto skip_write;
356
357 trace_f2fs_writepages(mapping->host, wbc, META);
358 diff = nr_pages_to_write(sbi, META, wbc);
359 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
355 goto skip_write;
356
357 trace_f2fs_writepages(mapping->host, wbc, META);
358 diff = nr_pages_to_write(sbi, META, wbc);
359 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
360 up_write(&sbi->cp_global_sem);
360 f2fs_up_write(&sbi->cp_global_sem);
361 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
362 return 0;
363
364skip_write:
365 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
366 trace_f2fs_writepages(mapping->host, wbc, META);
367 return 0;
368}

--- 785 unchanged lines hidden (view full) ---

1154
1155static bool __need_flush_quota(struct f2fs_sb_info *sbi)
1156{
1157 bool ret = false;
1158
1159 if (!is_journalled_quota(sbi))
1160 return false;
1161
361 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
362 return 0;
363
364skip_write:
365 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
366 trace_f2fs_writepages(mapping->host, wbc, META);
367 return 0;
368}

--- 785 unchanged lines hidden (view full) ---

1154
1155static bool __need_flush_quota(struct f2fs_sb_info *sbi)
1156{
1157 bool ret = false;
1158
1159 if (!is_journalled_quota(sbi))
1160 return false;
1161
1162 if (!down_write_trylock(&sbi->quota_sem))
1162 if (!f2fs_down_write_trylock(&sbi->quota_sem))
1163 return true;
1164 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
1165 ret = false;
1166 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {
1167 ret = false;
1168 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) {
1169 clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1170 ret = true;
1171 } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) {
1172 ret = true;
1173 }
1163 return true;
1164 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
1165 ret = false;
1166 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {
1167 ret = false;
1168 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) {
1169 clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1170 ret = true;
1171 } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) {
1172 ret = true;
1173 }
1174 up_write(&sbi->quota_sem);
1174 f2fs_up_write(&sbi->quota_sem);
1175 return ret;
1176}
1177
1178/*
1179 * Freeze all the FS-operations for checkpoint.
1180 */
1181static int block_operations(struct f2fs_sb_info *sbi)
1182{

--- 40 unchanged lines hidden (view full) ---

1223 cond_resched();
1224 goto retry_flush_quotas;
1225 }
1226
1227 /*
1228 * POR: we should ensure that there are no dirty node pages
1229 * until finishing nat/sit flush. inode->i_blocks can be updated.
1230 */
1175 return ret;
1176}
1177
1178/*
1179 * Freeze all the FS-operations for checkpoint.
1180 */
1181static int block_operations(struct f2fs_sb_info *sbi)
1182{

--- 40 unchanged lines hidden (view full) ---

1223 cond_resched();
1224 goto retry_flush_quotas;
1225 }
1226
1227 /*
1228 * POR: we should ensure that there are no dirty node pages
1229 * until finishing nat/sit flush. inode->i_blocks can be updated.
1230 */
1231 down_write(&sbi->node_change);
1231 f2fs_down_write(&sbi->node_change);
1232
1233 if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
1232
1233 if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
1234 up_write(&sbi->node_change);
1234 f2fs_up_write(&sbi->node_change);
1235 f2fs_unlock_all(sbi);
1236 err = f2fs_sync_inode_meta(sbi);
1237 if (err)
1238 return err;
1239 cond_resched();
1240 goto retry_flush_quotas;
1241 }
1242
1243retry_flush_nodes:
1235 f2fs_unlock_all(sbi);
1236 err = f2fs_sync_inode_meta(sbi);
1237 if (err)
1238 return err;
1239 cond_resched();
1240 goto retry_flush_quotas;
1241 }
1242
1243retry_flush_nodes:
1244 down_write(&sbi->node_write);
1244 f2fs_down_write(&sbi->node_write);
1245
1246 if (get_pages(sbi, F2FS_DIRTY_NODES)) {
1245
1246 if (get_pages(sbi, F2FS_DIRTY_NODES)) {
1247 up_write(&sbi->node_write);
1247 f2fs_up_write(&sbi->node_write);
1248 atomic_inc(&sbi->wb_sync_req[NODE]);
1249 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
1250 atomic_dec(&sbi->wb_sync_req[NODE]);
1251 if (err) {
1248 atomic_inc(&sbi->wb_sync_req[NODE]);
1249 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
1250 atomic_dec(&sbi->wb_sync_req[NODE]);
1251 if (err) {
1252 up_write(&sbi->node_change);
1252 f2fs_up_write(&sbi->node_change);
1253 f2fs_unlock_all(sbi);
1254 return err;
1255 }
1256 cond_resched();
1257 goto retry_flush_nodes;
1258 }
1259
1260 /*
1261 * sbi->node_change is used only for AIO write_begin path which produces
1262 * dirty node blocks and some checkpoint values by block allocation.
1263 */
1264 __prepare_cp_block(sbi);
1253 f2fs_unlock_all(sbi);
1254 return err;
1255 }
1256 cond_resched();
1257 goto retry_flush_nodes;
1258 }
1259
1260 /*
1261 * sbi->node_change is used only for AIO write_begin path which produces
1262 * dirty node blocks and some checkpoint values by block allocation.
1263 */
1264 __prepare_cp_block(sbi);
1265 up_write(&sbi->node_change);
1265 f2fs_up_write(&sbi->node_change);
1266 return err;
1267}
1268
1269static void unblock_operations(struct f2fs_sb_info *sbi)
1270{
1266 return err;
1267}
1268
1269static void unblock_operations(struct f2fs_sb_info *sbi)
1270{
1271 up_write(&sbi->node_write);
1271 f2fs_up_write(&sbi->node_write);
1272 f2fs_unlock_all(sbi);
1273}
1274
1275void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
1276{
1277 DEFINE_WAIT(wait);
1278
1279 for (;;) {

--- 327 unchanged lines hidden (view full) ---

1607 return -EROFS;
1608
1609 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1610 if (cpc->reason != CP_PAUSE)
1611 return 0;
1612 f2fs_warn(sbi, "Start checkpoint disabled!");
1613 }
1614 if (cpc->reason != CP_RESIZE)
1272 f2fs_unlock_all(sbi);
1273}
1274
1275void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
1276{
1277 DEFINE_WAIT(wait);
1278
1279 for (;;) {

--- 327 unchanged lines hidden (view full) ---

1607 return -EROFS;
1608
1609 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1610 if (cpc->reason != CP_PAUSE)
1611 return 0;
1612 f2fs_warn(sbi, "Start checkpoint disabled!");
1613 }
1614 if (cpc->reason != CP_RESIZE)
1615 down_write(&sbi->cp_global_sem);
1615 f2fs_down_write(&sbi->cp_global_sem);
1616
1617 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
1618 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
1619 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks)))
1620 goto out;
1621 if (unlikely(f2fs_cp_error(sbi))) {
1622 err = -EIO;
1623 goto out;

--- 64 unchanged lines hidden (view full) ---

1688 if (cpc->reason & CP_RECOVERY)
1689 f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
1690
1691 /* update CP_TIME to trigger checkpoint periodically */
1692 f2fs_update_time(sbi, CP_TIME);
1693 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
1694out:
1695 if (cpc->reason != CP_RESIZE)
1616
1617 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
1618 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
1619 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks)))
1620 goto out;
1621 if (unlikely(f2fs_cp_error(sbi))) {
1622 err = -EIO;
1623 goto out;

--- 64 unchanged lines hidden (view full) ---

1688 if (cpc->reason & CP_RECOVERY)
1689 f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
1690
1691 /* update CP_TIME to trigger checkpoint periodically */
1692 f2fs_update_time(sbi, CP_TIME);
1693 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
1694out:
1695 if (cpc->reason != CP_RESIZE)
1696 up_write(&sbi->cp_global_sem);
1696 f2fs_up_write(&sbi->cp_global_sem);
1697 return err;
1698}
1699
1700void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
1701{
1702 int i;
1703
1704 for (i = 0; i < MAX_INO_ENTRY; i++) {

--- 31 unchanged lines hidden (view full) ---

1736 kmem_cache_destroy(f2fs_inode_entry_slab);
1737}
1738
1739static int __write_checkpoint_sync(struct f2fs_sb_info *sbi)
1740{
1741 struct cp_control cpc = { .reason = CP_SYNC, };
1742 int err;
1743
1697 return err;
1698}
1699
1700void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
1701{
1702 int i;
1703
1704 for (i = 0; i < MAX_INO_ENTRY; i++) {

--- 31 unchanged lines hidden (view full) ---

1736 kmem_cache_destroy(f2fs_inode_entry_slab);
1737}
1738
1739static int __write_checkpoint_sync(struct f2fs_sb_info *sbi)
1740{
1741 struct cp_control cpc = { .reason = CP_SYNC, };
1742 int err;
1743
1744 down_write(&sbi->gc_lock);
1744 f2fs_down_write(&sbi->gc_lock);
1745 err = f2fs_write_checkpoint(sbi, &cpc);
1745 err = f2fs_write_checkpoint(sbi, &cpc);
1746 up_write(&sbi->gc_lock);
1746 f2fs_up_write(&sbi->gc_lock);
1747
1748 return err;
1749}
1750
1751static void __checkpoint_and_complete_reqs(struct f2fs_sb_info *sbi)
1752{
1753 struct ckpt_req_control *cprc = &sbi->cprc_info;
1754 struct ckpt_req *req, *next;

--- 71 unchanged lines hidden (view full) ---

1826 struct ckpt_req_control *cprc = &sbi->cprc_info;
1827 struct ckpt_req req;
1828 struct cp_control cpc;
1829
1830 cpc.reason = __get_cp_reason(sbi);
1831 if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) {
1832 int ret;
1833
1747
1748 return err;
1749}
1750
1751static void __checkpoint_and_complete_reqs(struct f2fs_sb_info *sbi)
1752{
1753 struct ckpt_req_control *cprc = &sbi->cprc_info;
1754 struct ckpt_req *req, *next;

--- 71 unchanged lines hidden (view full) ---

1826 struct ckpt_req_control *cprc = &sbi->cprc_info;
1827 struct ckpt_req req;
1828 struct cp_control cpc;
1829
1830 cpc.reason = __get_cp_reason(sbi);
1831 if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) {
1832 int ret;
1833
1834 down_write(&sbi->gc_lock);
1834 f2fs_down_write(&sbi->gc_lock);
1835 ret = f2fs_write_checkpoint(sbi, &cpc);
1835 ret = f2fs_write_checkpoint(sbi, &cpc);
1836 up_write(&sbi->gc_lock);
1836 f2fs_up_write(&sbi->gc_lock);
1837
1838 return ret;
1839 }
1840
1841 if (!cprc->f2fs_issue_ckpt)
1842 return __write_checkpoint_sync(sbi);
1843
1844 init_ckpt_req(&req);

--- 68 unchanged lines hidden ---
1837
1838 return ret;
1839 }
1840
1841 if (!cprc->f2fs_issue_ckpt)
1842 return __write_checkpoint_sync(sbi);
1843
1844 init_ckpt_req(&req);

--- 68 unchanged lines hidden ---