checkpoint.c (8c57a5e7b2820f349c95b8c8393fec1e0f4070d2) | checkpoint.c (70246286e94c335b5bea0cbc68a17a96dd620281) |
---|---|
1/* 2 * fs/f2fs/checkpoint.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 12 unchanged lines hidden (view full) --- 21#include "node.h" 22#include "segment.h" 23#include "trace.h" 24#include <trace/events/f2fs.h> 25 26static struct kmem_cache *ino_entry_slab; 27struct kmem_cache *inode_entry_slab; 28 | 1/* 2 * fs/f2fs/checkpoint.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 12 unchanged lines hidden (view full) --- 21#include "node.h" 22#include "segment.h" 23#include "trace.h" 24#include <trace/events/f2fs.h> 25 26static struct kmem_cache *ino_entry_slab; 27struct kmem_cache *inode_entry_slab; 28 |
29void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io) 30{ 31 set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); 32 sbi->sb->s_flags |= MS_RDONLY; 33 if (!end_io) 34 f2fs_flush_merged_bios(sbi); 35} 36 |
|
29/* 30 * We guarantee no failure on the returned page. 31 */ 32struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) 33{ 34 struct address_space *mapping = META_MAPPING(sbi); 35 struct page *page = NULL; 36repeat: | 37/* 38 * We guarantee no failure on the returned page. 39 */ 40struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) 41{ 42 struct address_space *mapping = META_MAPPING(sbi); 43 struct page *page = NULL; 44repeat: |
37 page = grab_cache_page(mapping, index); | 45 page = f2fs_grab_cache_page(mapping, index, false); |
38 if (!page) { 39 cond_resched(); 40 goto repeat; 41 } 42 f2fs_wait_on_page_writeback(page, META, true); 43 SetPageUptodate(page); 44 return page; 45} --- 4 unchanged lines hidden (view full) --- 50static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, 51 bool is_meta) 52{ 53 struct address_space *mapping = META_MAPPING(sbi); 54 struct page *page; 55 struct f2fs_io_info fio = { 56 .sbi = sbi, 57 .type = META, | 46 if (!page) { 47 cond_resched(); 48 goto repeat; 49 } 50 f2fs_wait_on_page_writeback(page, META, true); 51 SetPageUptodate(page); 52 return page; 53} --- 4 unchanged lines hidden (view full) --- 58static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, 59 bool is_meta) 60{ 61 struct address_space *mapping = META_MAPPING(sbi); 62 struct page *page; 63 struct f2fs_io_info fio = { 64 .sbi = sbi, 65 .type = META, |
58 .rw = READ_SYNC | REQ_META | REQ_PRIO, | 66 .op = REQ_OP_READ, 67 .op_flags = READ_SYNC | REQ_META | REQ_PRIO, |
59 .old_blkaddr = index, 60 .new_blkaddr = index, 61 .encrypted_page = NULL, 62 }; 63 64 if (unlikely(!is_meta)) | 68 .old_blkaddr = index, 69 .new_blkaddr = index, 70 .encrypted_page = NULL, 71 }; 72 73 if (unlikely(!is_meta)) |
65 fio.rw &= ~REQ_META; | 74 fio.op_flags &= ~REQ_META; |
66repeat: | 75repeat: |
67 page = grab_cache_page(mapping, index); | 76 page = f2fs_grab_cache_page(mapping, index, false); |
68 if (!page) { 69 cond_resched(); 70 goto repeat; 71 } 72 if (PageUptodate(page)) 73 goto out; 74 75 fio.page = page; --- 10 unchanged lines hidden (view full) --- 86 } 87 88 /* 89 * if there is any IO error when accessing device, make our filesystem 90 * readonly and make sure do not write checkpoint with non-uptodate 91 * meta page. 92 */ 93 if (unlikely(!PageUptodate(page))) | 77 if (!page) { 78 cond_resched(); 79 goto repeat; 80 } 81 if (PageUptodate(page)) 82 goto out; 83 84 fio.page = page; --- 10 unchanged lines hidden (view full) --- 95 } 96 97 /* 98 * if there is any IO error when accessing device, make our filesystem 99 * readonly and make sure do not write checkpoint with non-uptodate 100 * meta page. 101 */ 102 if (unlikely(!PageUptodate(page))) |
94 f2fs_stop_checkpoint(sbi); | 103 f2fs_stop_checkpoint(sbi, false); |
95out: 96 return page; 97} 98 99struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) 100{ 101 return __get_meta_page(sbi, index, true); 102} --- 41 unchanged lines hidden (view full) --- 144int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 145 int type, bool sync) 146{ 147 struct page *page; 148 block_t blkno = start; 149 struct f2fs_io_info fio = { 150 .sbi = sbi, 151 .type = META, | 104out: 105 return page; 106} 107 108struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) 109{ 110 return __get_meta_page(sbi, index, true); 111} --- 41 unchanged lines hidden (view full) --- 153int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 154 int type, bool sync) 155{ 156 struct page *page; 157 block_t blkno = start; 158 struct f2fs_io_info fio = { 159 .sbi = sbi, 160 .type = META, |
152 .rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA, | 161 .op = REQ_OP_READ, 162 .op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : REQ_RAHEAD, |
153 .encrypted_page = NULL, 154 }; 155 struct blk_plug plug; 156 157 if (unlikely(type == META_POR)) | 163 .encrypted_page = NULL, 164 }; 165 struct blk_plug plug; 166 167 if (unlikely(type == META_POR)) |
158 fio.rw &= ~REQ_META; | 168 fio.op_flags &= ~REQ_META; |
159 160 blk_start_plug(&plug); 161 for (; nrpages-- > 0; blkno++) { 162 163 if (!is_valid_blkaddr(sbi, blkno, type)) 164 goto out; 165 166 switch (type) { --- 14 unchanged lines hidden (view full) --- 181 case META_CP: 182 case META_POR: 183 fio.new_blkaddr = blkno; 184 break; 185 default: 186 BUG(); 187 } 188 | 169 170 blk_start_plug(&plug); 171 for (; nrpages-- > 0; blkno++) { 172 173 if (!is_valid_blkaddr(sbi, blkno, type)) 174 goto out; 175 176 switch (type) { --- 14 unchanged lines hidden (view full) --- 191 case META_CP: 192 case META_POR: 193 fio.new_blkaddr = blkno; 194 break; 195 default: 196 BUG(); 197 } 198 |
189 page = grab_cache_page(META_MAPPING(sbi), fio.new_blkaddr); | 199 page = f2fs_grab_cache_page(META_MAPPING(sbi), 200 fio.new_blkaddr, false); |
190 if (!page) 191 continue; 192 if (PageUptodate(page)) { 193 f2fs_put_page(page, 1); 194 continue; 195 } 196 197 fio.page = page; --- 8 unchanged lines hidden (view full) --- 206} 207 208void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index) 209{ 210 struct page *page; 211 bool readahead = false; 212 213 page = find_get_page(META_MAPPING(sbi), index); | 201 if (!page) 202 continue; 203 if (PageUptodate(page)) { 204 f2fs_put_page(page, 1); 205 continue; 206 } 207 208 fio.page = page; --- 8 unchanged lines hidden (view full) --- 217} 218 219void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index) 220{ 221 struct page *page; 222 bool readahead = false; 223 224 page = find_get_page(META_MAPPING(sbi), index); |
214 if (!page || (page && !PageUptodate(page))) | 225 if (!page || !PageUptodate(page)) |
215 readahead = true; 216 f2fs_put_page(page, 0); 217 218 if (readahead) 219 ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR, true); 220} 221 222static int f2fs_write_meta_page(struct page *page, --- 220 unchanged lines hidden (view full) --- 443 struct ino_entry *e; 444 445 spin_lock(&im->ino_lock); 446 e = radix_tree_lookup(&im->ino_root, ino); 447 spin_unlock(&im->ino_lock); 448 return e ? true : false; 449} 450 | 226 readahead = true; 227 f2fs_put_page(page, 0); 228 229 if (readahead) 230 ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR, true); 231} 232 233static int f2fs_write_meta_page(struct page *page, --- 220 unchanged lines hidden (view full) --- 454 struct ino_entry *e; 455 456 spin_lock(&im->ino_lock); 457 e = radix_tree_lookup(&im->ino_root, ino); 458 spin_unlock(&im->ino_lock); 459 return e ? true : false; 460} 461 |
451void release_ino_entry(struct f2fs_sb_info *sbi) | 462void release_ino_entry(struct f2fs_sb_info *sbi, bool all) |
452{ 453 struct ino_entry *e, *tmp; 454 int i; 455 | 463{ 464 struct ino_entry *e, *tmp; 465 int i; 466 |
456 for (i = APPEND_INO; i <= UPDATE_INO; i++) { | 467 for (i = all ? ORPHAN_INO: APPEND_INO; i <= UPDATE_INO; i++) { |
457 struct inode_management *im = &sbi->im[i]; 458 459 spin_lock(&im->ino_lock); 460 list_for_each_entry_safe(e, tmp, &im->ino_list, list) { 461 list_del(&e->list); 462 radix_tree_delete(&im->ino_root, e->ino); 463 kmem_cache_free(ino_entry_slab, e); 464 im->ino_num--; 465 } 466 spin_unlock(&im->ino_lock); 467 } 468} 469 470int acquire_orphan_inode(struct f2fs_sb_info *sbi) 471{ 472 struct inode_management *im = &sbi->im[ORPHAN_INO]; 473 int err = 0; 474 475 spin_lock(&im->ino_lock); | 468 struct inode_management *im = &sbi->im[i]; 469 470 spin_lock(&im->ino_lock); 471 list_for_each_entry_safe(e, tmp, &im->ino_list, list) { 472 list_del(&e->list); 473 radix_tree_delete(&im->ino_root, e->ino); 474 kmem_cache_free(ino_entry_slab, e); 475 im->ino_num--; 476 } 477 spin_unlock(&im->ino_lock); 478 } 479} 480 481int acquire_orphan_inode(struct f2fs_sb_info *sbi) 482{ 483 struct inode_management *im = &sbi->im[ORPHAN_INO]; 484 int err = 0; 485 486 spin_lock(&im->ino_lock); |
487 488#ifdef CONFIG_F2FS_FAULT_INJECTION 489 if (time_to_inject(FAULT_ORPHAN)) { 490 spin_unlock(&im->ino_lock); 491 return -ENOSPC; 492 } 493#endif |
|
476 if (unlikely(im->ino_num >= sbi->max_orphans)) 477 err = -ENOSPC; 478 else 479 im->ino_num++; 480 spin_unlock(&im->ino_lock); 481 482 return err; 483} --- 288 unchanged lines hidden (view full) --- 772{ 773 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 774 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE; 775 776 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 777 !S_ISLNK(inode->i_mode)) 778 return; 779 | 494 if (unlikely(im->ino_num >= sbi->max_orphans)) 495 err = -ENOSPC; 496 else 497 im->ino_num++; 498 spin_unlock(&im->ino_lock); 499 500 return err; 501} --- 288 unchanged lines hidden (view full) --- 790{ 791 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 792 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE; 793 794 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 795 !S_ISLNK(inode->i_mode)) 796 return; 797 |
780 spin_lock(&sbi->inode_lock[type]); 781 __add_dirty_inode(inode, type); 782 inode_inc_dirty_pages(inode); 783 spin_unlock(&sbi->inode_lock[type]); | 798 if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH)) { 799 spin_lock(&sbi->inode_lock[type]); 800 __add_dirty_inode(inode, type); 801 spin_unlock(&sbi->inode_lock[type]); 802 } |
784 | 803 |
804 inode_inc_dirty_pages(inode); |
|
785 SetPagePrivate(page); 786 f2fs_trace_pid(page); 787} 788 | 805 SetPagePrivate(page); 806 f2fs_trace_pid(page); 807} 808 |
789void add_dirty_dir_inode(struct inode *inode) 790{ 791 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 792 793 spin_lock(&sbi->inode_lock[DIR_INODE]); 794 __add_dirty_inode(inode, DIR_INODE); 795 spin_unlock(&sbi->inode_lock[DIR_INODE]); 796} 797 | |
798void remove_dirty_inode(struct inode *inode) 799{ 800 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | 809void remove_dirty_inode(struct inode *inode) 810{ 811 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
801 struct f2fs_inode_info *fi = F2FS_I(inode); | |
802 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE; 803 804 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 805 !S_ISLNK(inode->i_mode)) 806 return; 807 | 812 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE; 813 814 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 815 !S_ISLNK(inode->i_mode)) 816 return; 817 |
818 if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH)) 819 return; 820 |
|
808 spin_lock(&sbi->inode_lock[type]); 809 __remove_dirty_inode(inode, type); 810 spin_unlock(&sbi->inode_lock[type]); | 821 spin_lock(&sbi->inode_lock[type]); 822 __remove_dirty_inode(inode, type); 823 spin_unlock(&sbi->inode_lock[type]); |
811 812 /* Only from the recovery routine */ 813 if (is_inode_flag_set(fi, FI_DELAY_IPUT)) { 814 clear_inode_flag(fi, FI_DELAY_IPUT); 815 iput(inode); 816 } | |
817} 818 819int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type) 820{ 821 struct list_head *head; 822 struct inode *inode; 823 struct f2fs_inode_info *fi; 824 bool is_dir = (type == DIR_INODE); --- 62 unchanged lines hidden (view full) --- 887 * POR: we should ensure that there are no dirty node pages 888 * until finishing nat/sit flush. 889 */ 890retry_flush_nodes: 891 down_write(&sbi->node_write); 892 893 if (get_pages(sbi, F2FS_DIRTY_NODES)) { 894 up_write(&sbi->node_write); | 824} 825 826int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type) 827{ 828 struct list_head *head; 829 struct inode *inode; 830 struct f2fs_inode_info *fi; 831 bool is_dir = (type == DIR_INODE); --- 62 unchanged lines hidden (view full) --- 894 * POR: we should ensure that there are no dirty node pages 895 * until finishing nat/sit flush. 896 */ 897retry_flush_nodes: 898 down_write(&sbi->node_write); 899 900 if (get_pages(sbi, F2FS_DIRTY_NODES)) { 901 up_write(&sbi->node_write); |
895 err = sync_node_pages(sbi, 0, &wbc); | 902 err = sync_node_pages(sbi, &wbc); |
896 if (err) { 897 f2fs_unlock_all(sbi); 898 goto out; 899 } 900 goto retry_flush_nodes; 901 } 902out: 903 blk_finish_plug(&plug); --- 8 unchanged lines hidden (view full) --- 912 913static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi) 914{ 915 DEFINE_WAIT(wait); 916 917 for (;;) { 918 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); 919 | 903 if (err) { 904 f2fs_unlock_all(sbi); 905 goto out; 906 } 907 goto retry_flush_nodes; 908 } 909out: 910 blk_finish_plug(&plug); --- 8 unchanged lines hidden (view full) --- 919 920static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi) 921{ 922 DEFINE_WAIT(wait); 923 924 for (;;) { 925 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); 926 |
920 if (!get_pages(sbi, F2FS_WRITEBACK)) | 927 if (!atomic_read(&sbi->nr_wb_bios)) |
921 break; 922 923 io_schedule_timeout(5*HZ); 924 } 925 finish_wait(&sbi->cp_wait, &wait); 926} 927 928static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) --- 148 unchanged lines hidden (view full) --- 1077 if (unlikely(f2fs_cp_error(sbi))) 1078 return -EIO; 1079 1080 filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LLONG_MAX); 1081 filemap_fdatawait_range(META_MAPPING(sbi), 0, LLONG_MAX); 1082 1083 /* update user_block_counts */ 1084 sbi->last_valid_block_count = sbi->total_valid_block_count; | 928 break; 929 930 io_schedule_timeout(5*HZ); 931 } 932 finish_wait(&sbi->cp_wait, &wait); 933} 934 935static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) --- 148 unchanged lines hidden (view full) --- 1084 if (unlikely(f2fs_cp_error(sbi))) 1085 return -EIO; 1086 1087 filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LLONG_MAX); 1088 filemap_fdatawait_range(META_MAPPING(sbi), 0, LLONG_MAX); 1089 1090 /* update user_block_counts */ 1091 sbi->last_valid_block_count = sbi->total_valid_block_count; |
1085 sbi->alloc_valid_block_count = 0; | 1092 percpu_counter_set(&sbi->alloc_valid_block_count, 0); |
1086 1087 /* Here, we only have one bio having CP pack */ 1088 sync_meta_pages(sbi, META_FLUSH, LONG_MAX); 1089 1090 /* wait for previous submitted meta pages writeback */ 1091 wait_on_all_pages_writeback(sbi); 1092 1093 /* 1094 * invalidate meta page which is used temporarily for zeroing out 1095 * block at the end of warm node chain. 1096 */ 1097 if (invalidate) 1098 invalidate_mapping_pages(META_MAPPING(sbi), discard_blk, 1099 discard_blk); 1100 | 1093 1094 /* Here, we only have one bio having CP pack */ 1095 sync_meta_pages(sbi, META_FLUSH, LONG_MAX); 1096 1097 /* wait for previous submitted meta pages writeback */ 1098 wait_on_all_pages_writeback(sbi); 1099 1100 /* 1101 * invalidate meta page which is used temporarily for zeroing out 1102 * block at the end of warm node chain. 1103 */ 1104 if (invalidate) 1105 invalidate_mapping_pages(META_MAPPING(sbi), discard_blk, 1106 discard_blk); 1107 |
1101 release_ino_entry(sbi); | 1108 release_ino_entry(sbi, false); |
1102 1103 if (unlikely(f2fs_cp_error(sbi))) 1104 return -EIO; 1105 1106 clear_prefree_segments(sbi, cpc); 1107 clear_sbi_flag(sbi, SBI_IS_DIRTY); 1108 1109 return 0; --- 104 unchanged lines hidden --- | 1109 1110 if (unlikely(f2fs_cp_error(sbi))) 1111 return -EIO; 1112 1113 clear_prefree_segments(sbi, cpc); 1114 clear_sbi_flag(sbi, SBI_IS_DIRTY); 1115 1116 return 0; --- 104 unchanged lines hidden --- |