1 /* 2 * fs/f2fs/recovery.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include "f2fs.h" 14 #include "node.h" 15 #include "segment.h" 16 17 static struct kmem_cache *fsync_entry_slab; 18 19 bool space_for_roll_forward(struct f2fs_sb_info *sbi) 20 { 21 if (sbi->last_valid_block_count + sbi->alloc_valid_block_count 22 > sbi->user_block_count) 23 return false; 24 return true; 25 } 26 27 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head, 28 nid_t ino) 29 { 30 struct list_head *this; 31 struct fsync_inode_entry *entry; 32 33 list_for_each(this, head) { 34 entry = list_entry(this, struct fsync_inode_entry, list); 35 if (entry->inode->i_ino == ino) 36 return entry; 37 } 38 return NULL; 39 } 40 41 static int recover_dentry(struct page *ipage, struct inode *inode) 42 { 43 struct f2fs_node *raw_node = (struct f2fs_node *)kmap(ipage); 44 struct f2fs_inode *raw_inode = &(raw_node->i); 45 struct dentry dent, parent; 46 struct f2fs_dir_entry *de; 47 struct page *page; 48 struct inode *dir; 49 int err = 0; 50 51 if (!is_dent_dnode(ipage)) 52 goto out; 53 54 dir = f2fs_iget(inode->i_sb, le32_to_cpu(raw_inode->i_pino)); 55 if (IS_ERR(dir)) { 56 err = -EINVAL; 57 goto out; 58 } 59 60 parent.d_inode = dir; 61 dent.d_parent = &parent; 62 dent.d_name.len = le32_to_cpu(raw_inode->i_namelen); 63 dent.d_name.name = raw_inode->i_name; 64 65 de = f2fs_find_entry(dir, &dent.d_name, &page); 66 if (de) { 67 kunmap(page); 68 f2fs_put_page(page, 0); 69 } else { 70 f2fs_add_link(&dent, inode); 71 } 72 iput(dir); 73 out: 74 kunmap(ipage); 75 return err; 76 } 77 78 static int recover_inode(struct inode *inode, struct page *node_page) 79 { 80 void *kaddr = page_address(node_page); 81 struct f2fs_node *raw_node = (struct f2fs_node *)kaddr; 82 struct f2fs_inode *raw_inode = &(raw_node->i); 83 84 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 85 i_size_write(inode, le64_to_cpu(raw_inode->i_size)); 86 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 87 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); 88 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 89 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 90 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); 91 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 92 93 return recover_dentry(node_page, inode); 94 } 95 96 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) 97 { 98 unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver); 99 struct curseg_info *curseg; 100 struct page *page; 101 block_t blkaddr; 102 int err = 0; 103 104 /* get node pages in the current segment */ 105 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); 106 blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff; 107 108 /* read node page */ 109 page = alloc_page(GFP_F2FS_ZERO); 110 if (IS_ERR(page)) 111 return PTR_ERR(page); 112 lock_page(page); 113 114 while (1) { 115 struct fsync_inode_entry *entry; 116 117 if (f2fs_readpage(sbi, page, blkaddr, READ_SYNC)) 118 goto out; 119 120 if (cp_ver != cpver_of_node(page)) 121 goto out; 122 123 if (!is_fsync_dnode(page)) 124 goto next; 125 126 entry = get_fsync_inode(head, ino_of_node(page)); 127 if (entry) { 128 entry->blkaddr = blkaddr; 129 if (IS_INODE(page) && is_dent_dnode(page)) 130 set_inode_flag(F2FS_I(entry->inode), 131 FI_INC_LINK); 132 } else { 133 if (IS_INODE(page) && is_dent_dnode(page)) { 134 if (recover_inode_page(sbi, page)) { 135 err = -ENOMEM; 136 goto out; 137 } 138 } 139 140 /* add this fsync inode to the list */ 141 entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS); 142 if (!entry) { 143 err = -ENOMEM; 144 goto out; 145 } 146 147 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); 148 if (IS_ERR(entry->inode)) { 149 err = PTR_ERR(entry->inode); 150 kmem_cache_free(fsync_entry_slab, entry); 151 goto out; 152 } 153 154 INIT_LIST_HEAD(&entry->list); 155 list_add_tail(&entry->list, head); 156 entry->blkaddr = blkaddr; 157 } 158 if (IS_INODE(page)) { 159 err = recover_inode(entry->inode, page); 160 if (err) 161 goto out; 162 } 163 next: 164 /* check next segment */ 165 blkaddr = next_blkaddr_of_node(page); 166 ClearPageUptodate(page); 167 } 168 out: 169 unlock_page(page); 170 __free_pages(page, 0); 171 return err; 172 } 173 174 static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi, 175 struct list_head *head) 176 { 177 struct list_head *this; 178 struct fsync_inode_entry *entry; 179 list_for_each(this, head) { 180 entry = list_entry(this, struct fsync_inode_entry, list); 181 iput(entry->inode); 182 list_del(&entry->list); 183 kmem_cache_free(fsync_entry_slab, entry); 184 } 185 } 186 187 static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi, 188 block_t blkaddr) 189 { 190 struct seg_entry *sentry; 191 unsigned int segno = GET_SEGNO(sbi, blkaddr); 192 unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & 193 (sbi->blocks_per_seg - 1); 194 struct f2fs_summary sum; 195 nid_t ino; 196 void *kaddr; 197 struct inode *inode; 198 struct page *node_page; 199 block_t bidx; 200 int i; 201 202 sentry = get_seg_entry(sbi, segno); 203 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map)) 204 return; 205 206 /* Get the previous summary */ 207 for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) { 208 struct curseg_info *curseg = CURSEG_I(sbi, i); 209 if (curseg->segno == segno) { 210 sum = curseg->sum_blk->entries[blkoff]; 211 break; 212 } 213 } 214 if (i > CURSEG_COLD_DATA) { 215 struct page *sum_page = get_sum_page(sbi, segno); 216 struct f2fs_summary_block *sum_node; 217 kaddr = page_address(sum_page); 218 sum_node = (struct f2fs_summary_block *)kaddr; 219 sum = sum_node->entries[blkoff]; 220 f2fs_put_page(sum_page, 1); 221 } 222 223 /* Get the node page */ 224 node_page = get_node_page(sbi, le32_to_cpu(sum.nid)); 225 bidx = start_bidx_of_node(ofs_of_node(node_page)) + 226 le16_to_cpu(sum.ofs_in_node); 227 ino = ino_of_node(node_page); 228 f2fs_put_page(node_page, 1); 229 230 /* Deallocate previous index in the node page */ 231 inode = f2fs_iget_nowait(sbi->sb, ino); 232 if (IS_ERR(inode)) 233 return; 234 235 truncate_hole(inode, bidx, bidx + 1); 236 iput(inode); 237 } 238 239 static void do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, 240 struct page *page, block_t blkaddr) 241 { 242 unsigned int start, end; 243 struct dnode_of_data dn; 244 struct f2fs_summary sum; 245 struct node_info ni; 246 247 start = start_bidx_of_node(ofs_of_node(page)); 248 if (IS_INODE(page)) 249 end = start + ADDRS_PER_INODE; 250 else 251 end = start + ADDRS_PER_BLOCK; 252 253 set_new_dnode(&dn, inode, NULL, NULL, 0); 254 if (get_dnode_of_data(&dn, start, 0)) 255 return; 256 257 wait_on_page_writeback(dn.node_page); 258 259 get_node_info(sbi, dn.nid, &ni); 260 BUG_ON(ni.ino != ino_of_node(page)); 261 BUG_ON(ofs_of_node(dn.node_page) != ofs_of_node(page)); 262 263 for (; start < end; start++) { 264 block_t src, dest; 265 266 src = datablock_addr(dn.node_page, dn.ofs_in_node); 267 dest = datablock_addr(page, dn.ofs_in_node); 268 269 if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) { 270 if (src == NULL_ADDR) { 271 int err = reserve_new_block(&dn); 272 /* We should not get -ENOSPC */ 273 BUG_ON(err); 274 } 275 276 /* Check the previous node page having this index */ 277 check_index_in_prev_nodes(sbi, dest); 278 279 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); 280 281 /* write dummy data page */ 282 recover_data_page(sbi, NULL, &sum, src, dest); 283 update_extent_cache(dest, &dn); 284 } 285 dn.ofs_in_node++; 286 } 287 288 /* write node page in place */ 289 set_summary(&sum, dn.nid, 0, 0); 290 if (IS_INODE(dn.node_page)) 291 sync_inode_page(&dn); 292 293 copy_node_footer(dn.node_page, page); 294 fill_node_footer(dn.node_page, dn.nid, ni.ino, 295 ofs_of_node(page), false); 296 set_page_dirty(dn.node_page); 297 298 recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr); 299 f2fs_put_dnode(&dn); 300 } 301 302 static void recover_data(struct f2fs_sb_info *sbi, 303 struct list_head *head, int type) 304 { 305 unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver); 306 struct curseg_info *curseg; 307 struct page *page; 308 block_t blkaddr; 309 310 /* get node pages in the current segment */ 311 curseg = CURSEG_I(sbi, type); 312 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 313 314 /* read node page */ 315 page = alloc_page(GFP_NOFS | __GFP_ZERO); 316 if (IS_ERR(page)) 317 return; 318 lock_page(page); 319 320 while (1) { 321 struct fsync_inode_entry *entry; 322 323 if (f2fs_readpage(sbi, page, blkaddr, READ_SYNC)) 324 goto out; 325 326 if (cp_ver != cpver_of_node(page)) 327 goto out; 328 329 entry = get_fsync_inode(head, ino_of_node(page)); 330 if (!entry) 331 goto next; 332 333 do_recover_data(sbi, entry->inode, page, blkaddr); 334 335 if (entry->blkaddr == blkaddr) { 336 iput(entry->inode); 337 list_del(&entry->list); 338 kmem_cache_free(fsync_entry_slab, entry); 339 } 340 next: 341 /* check next segment */ 342 blkaddr = next_blkaddr_of_node(page); 343 ClearPageUptodate(page); 344 } 345 out: 346 unlock_page(page); 347 __free_pages(page, 0); 348 349 allocate_new_segments(sbi); 350 } 351 352 void recover_fsync_data(struct f2fs_sb_info *sbi) 353 { 354 struct list_head inode_list; 355 356 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", 357 sizeof(struct fsync_inode_entry), NULL); 358 if (unlikely(!fsync_entry_slab)) 359 return; 360 361 INIT_LIST_HEAD(&inode_list); 362 363 /* step #1: find fsynced inode numbers */ 364 if (find_fsync_dnodes(sbi, &inode_list)) 365 goto out; 366 367 if (list_empty(&inode_list)) 368 goto out; 369 370 /* step #2: recover data */ 371 sbi->por_doing = 1; 372 recover_data(sbi, &inode_list, CURSEG_WARM_NODE); 373 sbi->por_doing = 0; 374 BUG_ON(!list_empty(&inode_list)); 375 out: 376 destroy_fsync_dnodes(sbi, &inode_list); 377 kmem_cache_destroy(fsync_entry_slab); 378 write_checkpoint(sbi, false, false); 379 } 380