node.c (8c57a5e7b2820f349c95b8c8393fec1e0f4070d2) node.c (70246286e94c335b5bea0cbc68a17a96dd620281)
1/*
2 * fs/f2fs/node.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 393 unchanged lines hidden (view full) ---

402cache:
403 up_read(&nm_i->nat_tree_lock);
404 /* cache nat entry */
405 down_write(&nm_i->nat_tree_lock);
406 cache_nat_entry(sbi, nid, &ne);
407 up_write(&nm_i->nat_tree_lock);
408}
409
1/*
2 * fs/f2fs/node.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 393 unchanged lines hidden (view full) ---

402cache:
403 up_read(&nm_i->nat_tree_lock);
404 /* cache nat entry */
405 down_write(&nm_i->nat_tree_lock);
406 cache_nat_entry(sbi, nid, &ne);
407 up_write(&nm_i->nat_tree_lock);
408}
409
410/*
411 * readahead MAX_RA_NODE number of node pages.
412 */
413static void ra_node_pages(struct page *parent, int start, int n)
414{
415 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
416 struct blk_plug plug;
417 int i, end;
418 nid_t nid;
419
420 blk_start_plug(&plug);
421
422 /* Then, try readahead for siblings of the desired node */
423 end = start + n;
424 end = min(end, NIDS_PER_BLOCK);
425 for (i = start; i < end; i++) {
426 nid = get_nid(parent, i, false);
427 ra_node_page(sbi, nid);
428 }
429
430 blk_finish_plug(&plug);
431}
432
410pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
411{
412 const long direct_index = ADDRS_PER_INODE(dn->inode);
413 const long direct_blks = ADDRS_PER_BLOCK;
414 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
415 unsigned int skipped_unit = ADDRS_PER_BLOCK;
416 int cur_level = dn->cur_level;
417 int max_level = dn->max_level;

--- 284 unchanged lines hidden (view full) ---

702 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
703
704 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
705 if (IS_ERR(page)) {
706 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
707 return PTR_ERR(page);
708 }
709
433pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
434{
435 const long direct_index = ADDRS_PER_INODE(dn->inode);
436 const long direct_blks = ADDRS_PER_BLOCK;
437 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
438 unsigned int skipped_unit = ADDRS_PER_BLOCK;
439 int cur_level = dn->cur_level;
440 int max_level = dn->max_level;

--- 284 unchanged lines hidden (view full) ---

725 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
726
727 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
728 if (IS_ERR(page)) {
729 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
730 return PTR_ERR(page);
731 }
732
733 ra_node_pages(page, ofs, NIDS_PER_BLOCK);
734
710 rn = F2FS_NODE(page);
711 if (depth < 3) {
712 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
713 child_nid = le32_to_cpu(rn->in.nid[i]);
714 if (child_nid == 0)
715 continue;
716 rdn.nid = child_nid;
717 ret = truncate_dnode(&rdn);

--- 61 unchanged lines hidden (view full) ---

779 if (IS_ERR(pages[i])) {
780 err = PTR_ERR(pages[i]);
781 idx = i - 1;
782 goto fail;
783 }
784 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
785 }
786
735 rn = F2FS_NODE(page);
736 if (depth < 3) {
737 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
738 child_nid = le32_to_cpu(rn->in.nid[i]);
739 if (child_nid == 0)
740 continue;
741 rdn.nid = child_nid;
742 ret = truncate_dnode(&rdn);

--- 61 unchanged lines hidden (view full) ---

804 if (IS_ERR(pages[i])) {
805 err = PTR_ERR(pages[i]);
806 idx = i - 1;
807 goto fail;
808 }
809 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
810 }
811
812 ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
813
787 /* free direct nodes linked to a partial indirect node */
788 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
789 child_nid = get_nid(pages[idx], i, false);
790 if (!child_nid)
791 continue;
792 dn->nid = child_nid;
793 err = truncate_dnode(dn);
794 if (err < 0)

--- 32 unchanged lines hidden (view full) ---

827 unsigned int nofs = 0;
828 struct f2fs_inode *ri;
829 struct dnode_of_data dn;
830 struct page *page;
831
832 trace_f2fs_truncate_inode_blocks_enter(inode, from);
833
834 level = get_node_path(inode, from, offset, noffset);
814 /* free direct nodes linked to a partial indirect node */
815 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
816 child_nid = get_nid(pages[idx], i, false);
817 if (!child_nid)
818 continue;
819 dn->nid = child_nid;
820 err = truncate_dnode(dn);
821 if (err < 0)

--- 32 unchanged lines hidden (view full) ---

854 unsigned int nofs = 0;
855 struct f2fs_inode *ri;
856 struct dnode_of_data dn;
857 struct page *page;
858
859 trace_f2fs_truncate_inode_blocks_enter(inode, from);
860
861 level = get_node_path(inode, from, offset, noffset);
835restart:
862
836 page = get_node_page(sbi, inode->i_ino);
837 if (IS_ERR(page)) {
838 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
839 return PTR_ERR(page);
840 }
841
842 set_new_dnode(&dn, inode, page, NULL, 0);
843 unlock_page(page);

--- 47 unchanged lines hidden (view full) ---

891 default:
892 BUG();
893 }
894 if (err < 0 && err != -ENOENT)
895 goto fail;
896 if (offset[1] == 0 &&
897 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
898 lock_page(page);
863 page = get_node_page(sbi, inode->i_ino);
864 if (IS_ERR(page)) {
865 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
866 return PTR_ERR(page);
867 }
868
869 set_new_dnode(&dn, inode, page, NULL, 0);
870 unlock_page(page);

--- 47 unchanged lines hidden (view full) ---

918 default:
919 BUG();
920 }
921 if (err < 0 && err != -ENOENT)
922 goto fail;
923 if (offset[1] == 0 &&
924 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
925 lock_page(page);
899 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
900 f2fs_put_page(page, 1);
901 goto restart;
902 }
926 BUG_ON(page->mapping != NODE_MAPPING(sbi));
903 f2fs_wait_on_page_writeback(page, NODE, true);
904 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
905 set_page_dirty(page);
906 unlock_page(page);
907 }
908 offset[1] = 0;
909 offset[0]++;
910 nofs += err;

--- 82 unchanged lines hidden (view full) ---

993 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
994 struct node_info old_ni, new_ni;
995 struct page *page;
996 int err;
997
998 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
999 return ERR_PTR(-EPERM);
1000
927 f2fs_wait_on_page_writeback(page, NODE, true);
928 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
929 set_page_dirty(page);
930 unlock_page(page);
931 }
932 offset[1] = 0;
933 offset[0]++;
934 nofs += err;

--- 82 unchanged lines hidden (view full) ---

1017 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1018 struct node_info old_ni, new_ni;
1019 struct page *page;
1020 int err;
1021
1022 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
1023 return ERR_PTR(-EPERM);
1024
1001 page = grab_cache_page(NODE_MAPPING(sbi), dn->nid);
1025 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1002 if (!page)
1003 return ERR_PTR(-ENOMEM);
1004
1005 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
1006 err = -ENOSPC;
1007 goto fail;
1008 }
1009

--- 31 unchanged lines hidden (view full) ---

1041 return ERR_PTR(err);
1042}
1043
1044/*
1045 * Caller should do after getting the following values.
1046 * 0: f2fs_put_page(page, 0)
1047 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1048 */
1026 if (!page)
1027 return ERR_PTR(-ENOMEM);
1028
1029 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
1030 err = -ENOSPC;
1031 goto fail;
1032 }
1033

--- 31 unchanged lines hidden (view full) ---

1065 return ERR_PTR(err);
1066}
1067
1068/*
1069 * Caller should do after getting the following values.
1070 * 0: f2fs_put_page(page, 0)
1071 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1072 */
1049static int read_node_page(struct page *page, int rw)
1073static int read_node_page(struct page *page, int op_flags)
1050{
1051 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1052 struct node_info ni;
1053 struct f2fs_io_info fio = {
1054 .sbi = sbi,
1055 .type = NODE,
1074{
1075 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1076 struct node_info ni;
1077 struct f2fs_io_info fio = {
1078 .sbi = sbi,
1079 .type = NODE,
1056 .rw = rw,
1080 .op = REQ_OP_READ,
1081 .op_flags = op_flags,
1057 .page = page,
1058 .encrypted_page = NULL,
1059 };
1060
1061 get_node_info(sbi, page->index, &ni);
1062
1063 if (unlikely(ni.blk_addr == NULL_ADDR)) {
1064 ClearPageUptodate(page);

--- 20 unchanged lines hidden (view full) ---

1085 f2fs_bug_on(sbi, check_nid_range(sbi, nid));
1086
1087 rcu_read_lock();
1088 apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
1089 rcu_read_unlock();
1090 if (apage)
1091 return;
1092
1082 .page = page,
1083 .encrypted_page = NULL,
1084 };
1085
1086 get_node_info(sbi, page->index, &ni);
1087
1088 if (unlikely(ni.blk_addr == NULL_ADDR)) {
1089 ClearPageUptodate(page);

--- 20 unchanged lines hidden (view full) ---

1110 f2fs_bug_on(sbi, check_nid_range(sbi, nid));
1111
1112 rcu_read_lock();
1113 apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
1114 rcu_read_unlock();
1115 if (apage)
1116 return;
1117
1093 apage = grab_cache_page(NODE_MAPPING(sbi), nid);
1118 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1094 if (!apage)
1095 return;
1096
1119 if (!apage)
1120 return;
1121
1097 err = read_node_page(apage, READA);
1122 err = read_node_page(apage, REQ_RAHEAD);
1098 f2fs_put_page(apage, err ? 1 : 0);
1099}
1100
1123 f2fs_put_page(apage, err ? 1 : 0);
1124}
1125
1101/*
1102 * readahead MAX_RA_NODE number of node pages.
1103 */
1104static void ra_node_pages(struct page *parent, int start)
1105{
1106 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1107 struct blk_plug plug;
1108 int i, end;
1109 nid_t nid;
1110
1111 blk_start_plug(&plug);
1112
1113 /* Then, try readahead for siblings of the desired node */
1114 end = start + MAX_RA_NODE;
1115 end = min(end, NIDS_PER_BLOCK);
1116 for (i = start; i < end; i++) {
1117 nid = get_nid(parent, i, false);
1118 ra_node_page(sbi, nid);
1119 }
1120
1121 blk_finish_plug(&plug);
1122}
1123
1124static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1125 struct page *parent, int start)
1126{
1127 struct page *page;
1128 int err;
1129
1130 if (!nid)
1131 return ERR_PTR(-ENOENT);
1132 f2fs_bug_on(sbi, check_nid_range(sbi, nid));
1133repeat:
1126static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1127 struct page *parent, int start)
1128{
1129 struct page *page;
1130 int err;
1131
1132 if (!nid)
1133 return ERR_PTR(-ENOENT);
1134 f2fs_bug_on(sbi, check_nid_range(sbi, nid));
1135repeat:
1134 page = grab_cache_page(NODE_MAPPING(sbi), nid);
1136 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1135 if (!page)
1136 return ERR_PTR(-ENOMEM);
1137
1138 err = read_node_page(page, READ_SYNC);
1139 if (err < 0) {
1140 f2fs_put_page(page, 1);
1141 return ERR_PTR(err);
1142 } else if (err == LOCKED_PAGE) {
1143 goto page_hit;
1144 }
1145
1146 if (parent)
1137 if (!page)
1138 return ERR_PTR(-ENOMEM);
1139
1140 err = read_node_page(page, READ_SYNC);
1141 if (err < 0) {
1142 f2fs_put_page(page, 1);
1143 return ERR_PTR(err);
1144 } else if (err == LOCKED_PAGE) {
1145 goto page_hit;
1146 }
1147
1148 if (parent)
1147 ra_node_pages(parent, start + 1);
1149 ra_node_pages(parent, start + 1, MAX_RA_NODE);
1148
1149 lock_page(page);
1150
1151 if (unlikely(!PageUptodate(page))) {
1152 f2fs_put_page(page, 1);
1153 return ERR_PTR(-EIO);
1154 }
1155 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {

--- 35 unchanged lines hidden (view full) ---

1191 }
1192 dn->node_changed = ret ? true: false;
1193}
1194
1195static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1196{
1197 struct inode *inode;
1198 struct page *page;
1150
1151 lock_page(page);
1152
1153 if (unlikely(!PageUptodate(page))) {
1154 f2fs_put_page(page, 1);
1155 return ERR_PTR(-EIO);
1156 }
1157 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {

--- 35 unchanged lines hidden (view full) ---

1193 }
1194 dn->node_changed = ret ? true: false;
1195}
1196
1197static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1198{
1199 struct inode *inode;
1200 struct page *page;
1201 int ret;
1199
1200 /* should flush inline_data before evict_inode */
1201 inode = ilookup(sbi->sb, ino);
1202 if (!inode)
1203 return;
1204
1202
1203 /* should flush inline_data before evict_inode */
1204 inode = ilookup(sbi->sb, ino);
1205 if (!inode)
1206 return;
1207
1205 page = pagecache_get_page(inode->i_mapping, 0, FGP_NOWAIT, 0);
1208 page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0);
1206 if (!page)
1207 goto iput_out;
1208
1209 if (!page)
1210 goto iput_out;
1211
1209 if (!trylock_page(page))
1210 goto release_out;
1211
1212 if (!PageUptodate(page))
1213 goto page_out;
1214
1215 if (!PageDirty(page))
1216 goto page_out;
1217
1218 if (!clear_page_dirty_for_io(page))
1219 goto page_out;
1220
1212 if (!PageUptodate(page))
1213 goto page_out;
1214
1215 if (!PageDirty(page))
1216 goto page_out;
1217
1218 if (!clear_page_dirty_for_io(page))
1219 goto page_out;
1220
1221 if (!f2fs_write_inline_data(inode, page))
1222 inode_dec_dirty_pages(inode);
1223 else
1221 ret = f2fs_write_inline_data(inode, page);
1222 inode_dec_dirty_pages(inode);
1223 if (ret)
1224 set_page_dirty(page);
1225page_out:
1224 set_page_dirty(page);
1225page_out:
1226 unlock_page(page);
1227release_out:
1228 f2fs_put_page(page, 0);
1226 f2fs_put_page(page, 1);
1229iput_out:
1230 iput(inode);
1231}
1232
1227iput_out:
1228 iput(inode);
1229}
1230
1233int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1234 struct writeback_control *wbc)
1231void move_node_page(struct page *node_page, int gc_type)
1235{
1232{
1233 if (gc_type == FG_GC) {
1234 struct f2fs_sb_info *sbi = F2FS_P_SB(node_page);
1235 struct writeback_control wbc = {
1236 .sync_mode = WB_SYNC_ALL,
1237 .nr_to_write = 1,
1238 .for_reclaim = 0,
1239 };
1240
1241 set_page_dirty(node_page);
1242 f2fs_wait_on_page_writeback(node_page, NODE, true);
1243
1244 f2fs_bug_on(sbi, PageWriteback(node_page));
1245 if (!clear_page_dirty_for_io(node_page))
1246 goto out_page;
1247
1248 if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc))
1249 unlock_page(node_page);
1250 goto release_page;
1251 } else {
1252 /* set page dirty and write it */
1253 if (!PageWriteback(node_page))
1254 set_page_dirty(node_page);
1255 }
1256out_page:
1257 unlock_page(node_page);
1258release_page:
1259 f2fs_put_page(node_page, 0);
1260}
1261
1262static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1263{
1236 pgoff_t index, end;
1237 struct pagevec pvec;
1264 pgoff_t index, end;
1265 struct pagevec pvec;
1238 int step = ino ? 2 : 0;
1266 struct page *last_page = NULL;
1267
1268 pagevec_init(&pvec, 0);
1269 index = 0;
1270 end = ULONG_MAX;
1271
1272 while (index <= end) {
1273 int i, nr_pages;
1274 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1275 PAGECACHE_TAG_DIRTY,
1276 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1277 if (nr_pages == 0)
1278 break;
1279
1280 for (i = 0; i < nr_pages; i++) {
1281 struct page *page = pvec.pages[i];
1282
1283 if (unlikely(f2fs_cp_error(sbi))) {
1284 f2fs_put_page(last_page, 0);
1285 pagevec_release(&pvec);
1286 return ERR_PTR(-EIO);
1287 }
1288
1289 if (!IS_DNODE(page) || !is_cold_node(page))
1290 continue;
1291 if (ino_of_node(page) != ino)
1292 continue;
1293
1294 lock_page(page);
1295
1296 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1297continue_unlock:
1298 unlock_page(page);
1299 continue;
1300 }
1301 if (ino_of_node(page) != ino)
1302 goto continue_unlock;
1303
1304 if (!PageDirty(page)) {
1305 /* someone wrote it for us */
1306 goto continue_unlock;
1307 }
1308
1309 if (last_page)
1310 f2fs_put_page(last_page, 0);
1311
1312 get_page(page);
1313 last_page = page;
1314 unlock_page(page);
1315 }
1316 pagevec_release(&pvec);
1317 cond_resched();
1318 }
1319 return last_page;
1320}
1321
1322int fsync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1323 struct writeback_control *wbc, bool atomic)
1324{
1325 pgoff_t index, end;
1326 struct pagevec pvec;
1327 int ret = 0;
1328 struct page *last_page = NULL;
1329 bool marked = false;
1330
1331 if (atomic) {
1332 last_page = last_fsync_dnode(sbi, ino);
1333 if (IS_ERR_OR_NULL(last_page))
1334 return PTR_ERR_OR_ZERO(last_page);
1335 }
1336retry:
1337 pagevec_init(&pvec, 0);
1338 index = 0;
1339 end = ULONG_MAX;
1340
1341 while (index <= end) {
1342 int i, nr_pages;
1343 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1344 PAGECACHE_TAG_DIRTY,
1345 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1346 if (nr_pages == 0)
1347 break;
1348
1349 for (i = 0; i < nr_pages; i++) {
1350 struct page *page = pvec.pages[i];
1351
1352 if (unlikely(f2fs_cp_error(sbi))) {
1353 f2fs_put_page(last_page, 0);
1354 pagevec_release(&pvec);
1355 return -EIO;
1356 }
1357
1358 if (!IS_DNODE(page) || !is_cold_node(page))
1359 continue;
1360 if (ino_of_node(page) != ino)
1361 continue;
1362
1363 lock_page(page);
1364
1365 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1366continue_unlock:
1367 unlock_page(page);
1368 continue;
1369 }
1370 if (ino_of_node(page) != ino)
1371 goto continue_unlock;
1372
1373 if (!PageDirty(page) && page != last_page) {
1374 /* someone wrote it for us */
1375 goto continue_unlock;
1376 }
1377
1378 f2fs_wait_on_page_writeback(page, NODE, true);
1379 BUG_ON(PageWriteback(page));
1380
1381 if (!atomic || page == last_page) {
1382 set_fsync_mark(page, 1);
1383 if (IS_INODE(page))
1384 set_dentry_mark(page,
1385 need_dentry_mark(sbi, ino));
1386 /* may be written by other thread */
1387 if (!PageDirty(page))
1388 set_page_dirty(page);
1389 }
1390
1391 if (!clear_page_dirty_for_io(page))
1392 goto continue_unlock;
1393
1394 ret = NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
1395 if (ret) {
1396 unlock_page(page);
1397 f2fs_put_page(last_page, 0);
1398 break;
1399 }
1400 if (page == last_page) {
1401 f2fs_put_page(page, 0);
1402 marked = true;
1403 break;
1404 }
1405 }
1406 pagevec_release(&pvec);
1407 cond_resched();
1408
1409 if (ret || marked)
1410 break;
1411 }
1412 if (!ret && atomic && !marked) {
1413 f2fs_msg(sbi->sb, KERN_DEBUG,
1414 "Retry to write fsync mark: ino=%u, idx=%lx",
1415 ino, last_page->index);
1416 lock_page(last_page);
1417 set_page_dirty(last_page);
1418 unlock_page(last_page);
1419 goto retry;
1420 }
1421 return ret ? -EIO: 0;
1422}
1423
1424int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc)
1425{
1426 pgoff_t index, end;
1427 struct pagevec pvec;
1428 int step = 0;
1239 int nwritten = 0;
1240
1241 pagevec_init(&pvec, 0);
1242
1243next_step:
1244 index = 0;
1245 end = ULONG_MAX;
1246

--- 22 unchanged lines hidden (view full) ---

1269 if (step == 0 && IS_DNODE(page))
1270 continue;
1271 if (step == 1 && (!IS_DNODE(page) ||
1272 is_cold_node(page)))
1273 continue;
1274 if (step == 2 && (!IS_DNODE(page) ||
1275 !is_cold_node(page)))
1276 continue;
1429 int nwritten = 0;
1430
1431 pagevec_init(&pvec, 0);
1432
1433next_step:
1434 index = 0;
1435 end = ULONG_MAX;
1436

--- 22 unchanged lines hidden (view full) ---

1459 if (step == 0 && IS_DNODE(page))
1460 continue;
1461 if (step == 1 && (!IS_DNODE(page) ||
1462 is_cold_node(page)))
1463 continue;
1464 if (step == 2 && (!IS_DNODE(page) ||
1465 !is_cold_node(page)))
1466 continue;
1277
1278 /*
1279 * If an fsync mode,
1280 * we should not skip writing node pages.
1281 */
1282lock_node:
1467lock_node:
1283 if (ino && ino_of_node(page) == ino)
1284 lock_page(page);
1285 else if (!trylock_page(page))
1468 if (!trylock_page(page))
1286 continue;
1287
1288 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1289continue_unlock:
1290 unlock_page(page);
1291 continue;
1292 }
1469 continue;
1470
1471 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1472continue_unlock:
1473 unlock_page(page);
1474 continue;
1475 }
1293 if (ino && ino_of_node(page) != ino)
1294 goto continue_unlock;
1295
1296 if (!PageDirty(page)) {
1297 /* someone wrote it for us */
1298 goto continue_unlock;
1299 }
1300
1301 /* flush inline_data */
1476
1477 if (!PageDirty(page)) {
1478 /* someone wrote it for us */
1479 goto continue_unlock;
1480 }
1481
1482 /* flush inline_data */
1302 if (!ino && is_inline_node(page)) {
1483 if (is_inline_node(page)) {
1303 clear_inline_node(page);
1304 unlock_page(page);
1305 flush_inline_data(sbi, ino_of_node(page));
1306 goto lock_node;
1307 }
1308
1309 f2fs_wait_on_page_writeback(page, NODE, true);
1310
1311 BUG_ON(PageWriteback(page));
1312 if (!clear_page_dirty_for_io(page))
1313 goto continue_unlock;
1314
1484 clear_inline_node(page);
1485 unlock_page(page);
1486 flush_inline_data(sbi, ino_of_node(page));
1487 goto lock_node;
1488 }
1489
1490 f2fs_wait_on_page_writeback(page, NODE, true);
1491
1492 BUG_ON(PageWriteback(page));
1493 if (!clear_page_dirty_for_io(page))
1494 goto continue_unlock;
1495
1315 /* called by fsync() */
1316 if (ino && IS_DNODE(page)) {
1317 set_fsync_mark(page, 1);
1318 if (IS_INODE(page))
1319 set_dentry_mark(page,
1320 need_dentry_mark(sbi, ino));
1321 nwritten++;
1322 } else {
1323 set_fsync_mark(page, 0);
1324 set_dentry_mark(page, 0);
1325 }
1496 set_fsync_mark(page, 0);
1497 set_dentry_mark(page, 0);
1326
1327 if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
1328 unlock_page(page);
1329
1330 if (--wbc->nr_to_write == 0)
1331 break;
1332 }
1333 pagevec_release(&pvec);

--- 58 unchanged lines hidden (view full) ---

1392 struct writeback_control *wbc)
1393{
1394 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1395 nid_t nid;
1396 struct node_info ni;
1397 struct f2fs_io_info fio = {
1398 .sbi = sbi,
1399 .type = NODE,
1498
1499 if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
1500 unlock_page(page);
1501
1502 if (--wbc->nr_to_write == 0)
1503 break;
1504 }
1505 pagevec_release(&pvec);

--- 58 unchanged lines hidden (view full) ---

1564 struct writeback_control *wbc)
1565{
1566 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1567 nid_t nid;
1568 struct node_info ni;
1569 struct f2fs_io_info fio = {
1570 .sbi = sbi,
1571 .type = NODE,
1400 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1572 .op = REQ_OP_WRITE,
1573 .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
1401 .page = page,
1402 .encrypted_page = NULL,
1403 };
1404
1405 trace_f2fs_writepage(page, NODE);
1406
1407 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1408 goto redirty_out;

--- 56 unchanged lines hidden (view full) ---

1465 /* collect a number of dirty node pages and write together */
1466 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
1467 goto skip_write;
1468
1469 trace_f2fs_writepages(mapping->host, wbc, NODE);
1470
1471 diff = nr_pages_to_write(sbi, NODE, wbc);
1472 wbc->sync_mode = WB_SYNC_NONE;
1574 .page = page,
1575 .encrypted_page = NULL,
1576 };
1577
1578 trace_f2fs_writepage(page, NODE);
1579
1580 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1581 goto redirty_out;

--- 56 unchanged lines hidden (view full) ---

1638 /* collect a number of dirty node pages and write together */
1639 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
1640 goto skip_write;
1641
1642 trace_f2fs_writepages(mapping->host, wbc, NODE);
1643
1644 diff = nr_pages_to_write(sbi, NODE, wbc);
1645 wbc->sync_mode = WB_SYNC_NONE;
1473 sync_node_pages(sbi, 0, wbc);
1646 sync_node_pages(sbi, wbc);
1474 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1475 return 0;
1476
1477skip_write:
1478 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1479 trace_f2fs_writepages(mapping->host, wbc, NODE);
1480 return 0;
1481}

--- 37 unchanged lines hidden (view full) ---

1519 radix_tree_delete(&nm_i->free_nid_root, i->nid);
1520}
1521
1522static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
1523{
1524 struct f2fs_nm_info *nm_i = NM_I(sbi);
1525 struct free_nid *i;
1526 struct nat_entry *ne;
1647 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1648 return 0;
1649
1650skip_write:
1651 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1652 trace_f2fs_writepages(mapping->host, wbc, NODE);
1653 return 0;
1654}

--- 37 unchanged lines hidden (view full) ---

1692 radix_tree_delete(&nm_i->free_nid_root, i->nid);
1693}
1694
1695static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
1696{
1697 struct f2fs_nm_info *nm_i = NM_I(sbi);
1698 struct free_nid *i;
1699 struct nat_entry *ne;
1527 bool allocated = false;
1528
1529 if (!available_free_memory(sbi, FREE_NIDS))
1530 return -1;
1531
1532 /* 0 nid should not be used */
1533 if (unlikely(nid == 0))
1534 return 0;
1535
1536 if (build) {
1537 /* do not add allocated nids */
1538 ne = __lookup_nat_cache(nm_i, nid);
1539 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
1540 nat_get_blkaddr(ne) != NULL_ADDR))
1700
1701 if (!available_free_memory(sbi, FREE_NIDS))
1702 return -1;
1703
1704 /* 0 nid should not be used */
1705 if (unlikely(nid == 0))
1706 return 0;
1707
1708 if (build) {
1709 /* do not add allocated nids */
1710 ne = __lookup_nat_cache(nm_i, nid);
1711 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
1712 nat_get_blkaddr(ne) != NULL_ADDR))
1541 allocated = true;
1542 if (allocated)
1543 return 0;
1544 }
1545
1546 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1547 i->nid = nid;
1548 i->state = NID_NEW;
1549
1550 if (radix_tree_preload(GFP_NOFS)) {

--- 116 unchanged lines hidden (view full) ---

1667 * from second parameter of this function.
1668 * The returned nid could be used ino as well as nid when inode is created.
1669 */
1670bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1671{
1672 struct f2fs_nm_info *nm_i = NM_I(sbi);
1673 struct free_nid *i = NULL;
1674retry:
1713 return 0;
1714 }
1715
1716 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1717 i->nid = nid;
1718 i->state = NID_NEW;
1719
1720 if (radix_tree_preload(GFP_NOFS)) {

--- 116 unchanged lines hidden (view full) ---

1837 * from second parameter of this function.
1838 * The returned nid could be used ino as well as nid when inode is created.
1839 */
1840bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1841{
1842 struct f2fs_nm_info *nm_i = NM_I(sbi);
1843 struct free_nid *i = NULL;
1844retry:
1845#ifdef CONFIG_F2FS_FAULT_INJECTION
1846 if (time_to_inject(FAULT_ALLOC_NID))
1847 return false;
1848#endif
1675 if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
1676 return false;
1677
1678 spin_lock(&nm_i->free_nid_list_lock);
1679
1680 /* We should not use stale free nids created by build_free_nids */
1681 if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
1682 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));

--- 158 unchanged lines hidden (view full) ---

1841 struct node_info old_ni, new_ni;
1842 struct page *ipage;
1843
1844 get_node_info(sbi, ino, &old_ni);
1845
1846 if (unlikely(old_ni.blk_addr != NULL_ADDR))
1847 return -EINVAL;
1848
1849 if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
1850 return false;
1851
1852 spin_lock(&nm_i->free_nid_list_lock);
1853
1854 /* We should not use stale free nids created by build_free_nids */
1855 if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
1856 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));

--- 158 unchanged lines hidden (view full) ---

2015 struct node_info old_ni, new_ni;
2016 struct page *ipage;
2017
2018 get_node_info(sbi, ino, &old_ni);
2019
2020 if (unlikely(old_ni.blk_addr != NULL_ADDR))
2021 return -EINVAL;
2022
1849 ipage = grab_cache_page(NODE_MAPPING(sbi), ino);
2023 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
1850 if (!ipage)
1851 return -ENOMEM;
1852
1853 /* Should not use this inode from free nid list */
1854 remove_free_nid(NM_I(sbi), ino);
1855
1856 SetPageUptodate(ipage);
1857 fill_node_footer(ipage, ino, ino, 0, true);

--- 364 unchanged lines hidden ---
2024 if (!ipage)
2025 return -ENOMEM;
2026
2027 /* Should not use this inode from free nid list */
2028 remove_free_nid(NM_I(sbi), ino);
2029
2030 SetPageUptodate(ipage);
2031 fill_node_footer(ipage, ino, ino, 0, true);

--- 364 unchanged lines hidden ---