checkpoint.c (80b304fd00e8b667775ff791121b61ecd7cd0c03) | checkpoint.c (b5b822050ca3c4fc1f475100cc197cc00ba2d492) |
---|---|
1/* 2 * fs/f2fs/checkpoint.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 146 unchanged lines hidden (view full) --- 155 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 156 157 trace_f2fs_writepage(page, META); 158 159 if (unlikely(sbi->por_doing)) 160 goto redirty_out; 161 if (wbc->for_reclaim) 162 goto redirty_out; | 1/* 2 * fs/f2fs/checkpoint.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 146 unchanged lines hidden (view full) --- 155 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 156 157 trace_f2fs_writepage(page, META); 158 159 if (unlikely(sbi->por_doing)) 160 goto redirty_out; 161 if (wbc->for_reclaim) 162 goto redirty_out; |
163 if (unlikely(f2fs_cp_error(sbi))) 164 goto redirty_out; |
|
163 | 165 |
164 /* Should not write any meta pages, if any IO error was occurred */ 165 if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG))) 166 goto no_write; 167 | |
168 f2fs_wait_on_page_writeback(page, META); 169 write_meta_page(sbi, page); | 166 f2fs_wait_on_page_writeback(page, META); 167 write_meta_page(sbi, page); |
170no_write: | |
171 dec_page_count(sbi, F2FS_DIRTY_META); 172 unlock_page(page); 173 return 0; 174 175redirty_out: 176 redirty_page_for_writepage(wbc, page); 177 return AOP_WRITEPAGE_ACTIVATE; 178} --- 164 unchanged lines hidden (view full) --- 343{ 344 struct ino_entry *e; 345 spin_lock(&sbi->ino_lock[mode]); 346 e = radix_tree_lookup(&sbi->ino_root[mode], ino); 347 spin_unlock(&sbi->ino_lock[mode]); 348 return e ? true : false; 349} 350 | 168 dec_page_count(sbi, F2FS_DIRTY_META); 169 unlock_page(page); 170 return 0; 171 172redirty_out: 173 redirty_page_for_writepage(wbc, page); 174 return AOP_WRITEPAGE_ACTIVATE; 175} --- 164 unchanged lines hidden (view full) --- 340{ 341 struct ino_entry *e; 342 spin_lock(&sbi->ino_lock[mode]); 343 e = radix_tree_lookup(&sbi->ino_root[mode], ino); 344 spin_unlock(&sbi->ino_lock[mode]); 345 return e ? true : false; 346} 347 |
351static void release_dirty_inode(struct f2fs_sb_info *sbi) | 348void release_dirty_inode(struct f2fs_sb_info *sbi) |
352{ 353 struct ino_entry *e, *tmp; 354 int i; 355 356 for (i = APPEND_INO; i <= UPDATE_INO; i++) { 357 spin_lock(&sbi->ino_lock[i]); 358 list_for_each_entry_safe(e, tmp, &sbi->ino_list[i], list) { 359 list_del(&e->list); --- 81 unchanged lines hidden (view full) --- 441} 442 443static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) 444{ 445 struct list_head *head; 446 struct f2fs_orphan_block *orphan_blk = NULL; 447 unsigned int nentries = 0; 448 unsigned short index; | 349{ 350 struct ino_entry *e, *tmp; 351 int i; 352 353 for (i = APPEND_INO; i <= UPDATE_INO; i++) { 354 spin_lock(&sbi->ino_lock[i]); 355 list_for_each_entry_safe(e, tmp, &sbi->ino_list[i], list) { 356 list_del(&e->list); --- 81 unchanged lines hidden (view full) --- 438} 439 440static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) 441{ 442 struct list_head *head; 443 struct f2fs_orphan_block *orphan_blk = NULL; 444 unsigned int nentries = 0; 445 unsigned short index; |
449 unsigned short orphan_blocks = (unsigned short)((sbi->n_orphans + 450 (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK); | 446 unsigned short orphan_blocks = 447 (unsigned short)GET_ORPHAN_BLOCKS(sbi->n_orphans); |
451 struct page *page = NULL; 452 struct ino_entry *orphan = NULL; 453 454 for (index = 0; index < orphan_blocks; index++) 455 grab_meta_page(sbi, start_blk + index); 456 457 index = 1; 458 spin_lock(&sbi->ino_lock[ORPHAN_INO]); --- 273 unchanged lines hidden (view full) --- 732 f2fs_submit_merged_bio(sbi, DATA, WRITE); 733 } 734 goto retry; 735} 736 737/* 738 * Freeze all the FS-operations for checkpoint. 739 */ | 448 struct page *page = NULL; 449 struct ino_entry *orphan = NULL; 450 451 for (index = 0; index < orphan_blocks; index++) 452 grab_meta_page(sbi, start_blk + index); 453 454 index = 1; 455 spin_lock(&sbi->ino_lock[ORPHAN_INO]); --- 273 unchanged lines hidden (view full) --- 729 f2fs_submit_merged_bio(sbi, DATA, WRITE); 730 } 731 goto retry; 732} 733 734/* 735 * Freeze all the FS-operations for checkpoint. 736 */ |
740static void block_operations(struct f2fs_sb_info *sbi) | 737static int block_operations(struct f2fs_sb_info *sbi) |
741{ 742 struct writeback_control wbc = { 743 .sync_mode = WB_SYNC_ALL, 744 .nr_to_write = LONG_MAX, 745 .for_reclaim = 0, 746 }; 747 struct blk_plug plug; | 738{ 739 struct writeback_control wbc = { 740 .sync_mode = WB_SYNC_ALL, 741 .nr_to_write = LONG_MAX, 742 .for_reclaim = 0, 743 }; 744 struct blk_plug plug; |
745 int err = 0; |
|
748 749 blk_start_plug(&plug); 750 751retry_flush_dents: 752 f2fs_lock_all(sbi); 753 /* write all the dirty dentry pages */ 754 if (get_pages(sbi, F2FS_DIRTY_DENTS)) { 755 f2fs_unlock_all(sbi); 756 sync_dirty_dir_inodes(sbi); | 746 747 blk_start_plug(&plug); 748 749retry_flush_dents: 750 f2fs_lock_all(sbi); 751 /* write all the dirty dentry pages */ 752 if (get_pages(sbi, F2FS_DIRTY_DENTS)) { 753 f2fs_unlock_all(sbi); 754 sync_dirty_dir_inodes(sbi); |
755 if (unlikely(f2fs_cp_error(sbi))) { 756 err = -EIO; 757 goto out; 758 } |
|
757 goto retry_flush_dents; 758 } 759 760 /* | 759 goto retry_flush_dents; 760 } 761 762 /* |
761 * POR: we should ensure that there is no dirty node pages | 763 * POR: we should ensure that there are no dirty node pages |
762 * until finishing nat/sit flush. 763 */ 764retry_flush_nodes: 765 down_write(&sbi->node_write); 766 767 if (get_pages(sbi, F2FS_DIRTY_NODES)) { 768 up_write(&sbi->node_write); 769 sync_node_pages(sbi, 0, &wbc); | 764 * until finishing nat/sit flush. 765 */ 766retry_flush_nodes: 767 down_write(&sbi->node_write); 768 769 if (get_pages(sbi, F2FS_DIRTY_NODES)) { 770 up_write(&sbi->node_write); 771 sync_node_pages(sbi, 0, &wbc); |
772 if (unlikely(f2fs_cp_error(sbi))) { 773 f2fs_unlock_all(sbi); 774 err = -EIO; 775 goto out; 776 } |
|
770 goto retry_flush_nodes; 771 } | 777 goto retry_flush_nodes; 778 } |
779out: |
|
772 blk_finish_plug(&plug); | 780 blk_finish_plug(&plug); |
781 return err; |
|
773} 774 775static void unblock_operations(struct f2fs_sb_info *sbi) 776{ 777 up_write(&sbi->node_write); 778 f2fs_unlock_all(sbi); 779} 780 --- 27 unchanged lines hidden (view full) --- 808 809 /* 810 * This avoids to conduct wrong roll-forward operations and uses 811 * metapages, so should be called prior to sync_meta_pages below. 812 */ 813 discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg)); 814 815 /* Flush all the NAT/SIT pages */ | 782} 783 784static void unblock_operations(struct f2fs_sb_info *sbi) 785{ 786 up_write(&sbi->node_write); 787 f2fs_unlock_all(sbi); 788} 789 --- 27 unchanged lines hidden (view full) --- 817 818 /* 819 * This avoids to conduct wrong roll-forward operations and uses 820 * metapages, so should be called prior to sync_meta_pages below. 821 */ 822 discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg)); 823 824 /* Flush all the NAT/SIT pages */ |
816 while (get_pages(sbi, F2FS_DIRTY_META)) | 825 while (get_pages(sbi, F2FS_DIRTY_META)) { |
817 sync_meta_pages(sbi, META, LONG_MAX); | 826 sync_meta_pages(sbi, META, LONG_MAX); |
827 if (unlikely(f2fs_cp_error(sbi))) 828 return; 829 } |
|
818 819 next_free_nid(sbi, &last_nid); 820 821 /* 822 * modify checkpoint 823 * version number is already updated 824 */ 825 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi)); 826 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); 827 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); | 830 831 next_free_nid(sbi, &last_nid); 832 833 /* 834 * modify checkpoint 835 * version number is already updated 836 */ 837 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi)); 838 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); 839 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); |
828 for (i = 0; i < 3; i++) { | 840 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { |
829 ckpt->cur_node_segno[i] = 830 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE)); 831 ckpt->cur_node_blkoff[i] = 832 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE)); 833 ckpt->alloc_type[i + CURSEG_HOT_NODE] = 834 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE); 835 } | 841 ckpt->cur_node_segno[i] = 842 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE)); 843 ckpt->cur_node_blkoff[i] = 844 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE)); 845 ckpt->alloc_type[i + CURSEG_HOT_NODE] = 846 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE); 847 } |
836 for (i = 0; i < 3; i++) { | 848 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) { |
837 ckpt->cur_data_segno[i] = 838 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA)); 839 ckpt->cur_data_blkoff[i] = 840 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA)); 841 ckpt->alloc_type[i + CURSEG_HOT_DATA] = 842 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA); 843 } 844 845 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); 846 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); 847 ckpt->next_free_nid = cpu_to_le32(last_nid); 848 849 /* 2 cp + n data seg summary + orphan inode blocks */ 850 data_sum_blocks = npages_for_summary_flush(sbi); | 849 ckpt->cur_data_segno[i] = 850 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA)); 851 ckpt->cur_data_blkoff[i] = 852 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA)); 853 ckpt->alloc_type[i + CURSEG_HOT_DATA] = 854 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA); 855 } 856 857 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); 858 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); 859 ckpt->next_free_nid = cpu_to_le32(last_nid); 860 861 /* 2 cp + n data seg summary + orphan inode blocks */ 862 data_sum_blocks = npages_for_summary_flush(sbi); |
851 if (data_sum_blocks < 3) | 863 if (data_sum_blocks < NR_CURSEG_DATA_TYPE) |
852 set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); 853 else 854 clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); 855 | 864 set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); 865 else 866 clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); 867 |
856 orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1) 857 / F2FS_ORPHANS_PER_BLOCK; | 868 orphan_blocks = GET_ORPHAN_BLOCKS(sbi->n_orphans); |
858 ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks + 859 orphan_blocks); 860 861 if (is_umount) { 862 set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); | 869 ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks + 870 orphan_blocks); 871 872 if (is_umount) { 873 set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); |
863 ckpt->cp_pack_total_block_count = cpu_to_le32(2 + | 874 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+ |
864 cp_payload_blks + data_sum_blocks + 865 orphan_blocks + NR_CURSEG_NODE_TYPE); 866 } else { 867 clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG); | 875 cp_payload_blks + data_sum_blocks + 876 orphan_blocks + NR_CURSEG_NODE_TYPE); 877 } else { 878 clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG); |
868 ckpt->cp_pack_total_block_count = cpu_to_le32(2 + | 879 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS + |
869 cp_payload_blks + data_sum_blocks + 870 orphan_blocks); 871 } 872 873 if (sbi->n_orphans) 874 set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); 875 else 876 clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); --- 42 unchanged lines hidden (view full) --- 919 kaddr = page_address(cp_page); 920 memcpy(kaddr, ckpt, (1 << sbi->log_blocksize)); 921 set_page_dirty(cp_page); 922 f2fs_put_page(cp_page, 1); 923 924 /* wait for previous submitted node/meta pages writeback */ 925 wait_on_all_pages_writeback(sbi); 926 | 880 cp_payload_blks + data_sum_blocks + 881 orphan_blocks); 882 } 883 884 if (sbi->n_orphans) 885 set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); 886 else 887 clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); --- 42 unchanged lines hidden (view full) --- 930 kaddr = page_address(cp_page); 931 memcpy(kaddr, ckpt, (1 << sbi->log_blocksize)); 932 set_page_dirty(cp_page); 933 f2fs_put_page(cp_page, 1); 934 935 /* wait for previous submitted node/meta pages writeback */ 936 wait_on_all_pages_writeback(sbi); 937 |
938 if (unlikely(f2fs_cp_error(sbi))) 939 return; 940 |
|
927 filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX); 928 filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX); 929 930 /* update user_block_counts */ 931 sbi->last_valid_block_count = sbi->total_valid_block_count; 932 sbi->alloc_valid_block_count = 0; 933 934 /* Here, we only have one bio having CP pack */ 935 sync_meta_pages(sbi, META_FLUSH, LONG_MAX); 936 | 941 filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX); 942 filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX); 943 944 /* update user_block_counts */ 945 sbi->last_valid_block_count = sbi->total_valid_block_count; 946 sbi->alloc_valid_block_count = 0; 947 948 /* Here, we only have one bio having CP pack */ 949 sync_meta_pages(sbi, META_FLUSH, LONG_MAX); 950 |
937 if (!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) { 938 clear_prefree_segments(sbi); 939 release_dirty_inode(sbi); 940 F2FS_RESET_SB_DIRT(sbi); 941 } | 951 release_dirty_inode(sbi); 952 953 if (unlikely(f2fs_cp_error(sbi))) 954 return; 955 956 clear_prefree_segments(sbi); 957 F2FS_RESET_SB_DIRT(sbi); |
942} 943 944/* | 958} 959 960/* |
945 * We guarantee that this checkpoint procedure should not fail. | 961 * We guarantee that this checkpoint procedure will not fail. |
946 */ 947void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) 948{ 949 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 950 unsigned long long ckpt_ver; 951 952 trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops"); 953 954 mutex_lock(&sbi->cp_mutex); | 962 */ 963void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) 964{ 965 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 966 unsigned long long ckpt_ver; 967 968 trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops"); 969 970 mutex_lock(&sbi->cp_mutex); |
955 block_operations(sbi); | |
956 | 971 |
972 if (!sbi->s_dirty) 973 goto out; 974 if (unlikely(f2fs_cp_error(sbi))) 975 goto out; 976 if (block_operations(sbi)) 977 goto out; 978 |
|
957 trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops"); 958 959 f2fs_submit_merged_bio(sbi, DATA, WRITE); 960 f2fs_submit_merged_bio(sbi, NODE, WRITE); 961 f2fs_submit_merged_bio(sbi, META, WRITE); 962 963 /* 964 * update checkpoint pack index --- 6 unchanged lines hidden (view full) --- 971 /* write cached NAT/SIT entries to NAT/SIT area */ 972 flush_nat_entries(sbi); 973 flush_sit_entries(sbi); 974 975 /* unlock all the fs_lock[] in do_checkpoint() */ 976 do_checkpoint(sbi, is_umount); 977 978 unblock_operations(sbi); | 979 trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops"); 980 981 f2fs_submit_merged_bio(sbi, DATA, WRITE); 982 f2fs_submit_merged_bio(sbi, NODE, WRITE); 983 f2fs_submit_merged_bio(sbi, META, WRITE); 984 985 /* 986 * update checkpoint pack index --- 6 unchanged lines hidden (view full) --- 993 /* write cached NAT/SIT entries to NAT/SIT area */ 994 flush_nat_entries(sbi); 995 flush_sit_entries(sbi); 996 997 /* unlock all the fs_lock[] in do_checkpoint() */ 998 do_checkpoint(sbi, is_umount); 999 1000 unblock_operations(sbi); |
979 mutex_unlock(&sbi->cp_mutex); 980 | |
981 stat_inc_cp_count(sbi->stat_info); | 1001 stat_inc_cp_count(sbi->stat_info); |
1002out: 1003 mutex_unlock(&sbi->cp_mutex); |
|
982 trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint"); 983} 984 985void init_ino_entry_info(struct f2fs_sb_info *sbi) 986{ 987 int i; 988 989 for (i = 0; i < MAX_INO_ENTRY; i++) { --- 4 unchanged lines hidden (view full) --- 994 995 /* 996 * considering 512 blocks in a segment 8 blocks are needed for cp 997 * and log segment summaries. Remaining blocks are used to keep 998 * orphan entries with the limitation one reserved segment 999 * for cp pack we can have max 1020*504 orphan entries 1000 */ 1001 sbi->n_orphans = 0; | 1004 trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint"); 1005} 1006 1007void init_ino_entry_info(struct f2fs_sb_info *sbi) 1008{ 1009 int i; 1010 1011 for (i = 0; i < MAX_INO_ENTRY; i++) { --- 4 unchanged lines hidden (view full) --- 1016 1017 /* 1018 * considering 512 blocks in a segment 8 blocks are needed for cp 1019 * and log segment summaries. Remaining blocks are used to keep 1020 * orphan entries with the limitation one reserved segment 1021 * for cp pack we can have max 1020*504 orphan entries 1022 */ 1023 sbi->n_orphans = 0; |
1002 sbi->max_orphans = (sbi->blocks_per_seg - 2 - NR_CURSEG_TYPE) 1003 * F2FS_ORPHANS_PER_BLOCK; | 1024 sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS - 1025 NR_CURSEG_TYPE) * F2FS_ORPHANS_PER_BLOCK; |
1004} 1005 1006int __init create_checkpoint_caches(void) 1007{ 1008 ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry", 1009 sizeof(struct ino_entry)); 1010 if (!ino_entry_slab) 1011 return -ENOMEM; --- 14 unchanged lines hidden --- | 1026} 1027 1028int __init create_checkpoint_caches(void) 1029{ 1030 ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry", 1031 sizeof(struct ino_entry)); 1032 if (!ino_entry_slab) 1033 return -ENOMEM; --- 14 unchanged lines hidden --- |