1 /* 2 * fs/f2fs/recovery.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include "f2fs.h" 14 #include "node.h" 15 #include "segment.h" 16 17 static struct kmem_cache *fsync_entry_slab; 18 19 bool space_for_roll_forward(struct f2fs_sb_info *sbi) 20 { 21 if (sbi->last_valid_block_count + sbi->alloc_valid_block_count 22 > sbi->user_block_count) 23 return false; 24 return true; 25 } 26 27 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head, 28 nid_t ino) 29 { 30 struct fsync_inode_entry *entry; 31 32 list_for_each_entry(entry, head, list) 33 if (entry->inode->i_ino == ino) 34 return entry; 35 36 return NULL; 37 } 38 39 static int recover_dentry(struct page *ipage, struct inode *inode) 40 { 41 struct f2fs_inode *raw_inode = F2FS_INODE(ipage); 42 nid_t pino = le32_to_cpu(raw_inode->i_pino); 43 struct f2fs_dir_entry *de; 44 struct qstr name; 45 struct page *page; 46 struct inode *dir, *einode; 47 int err = 0; 48 49 dir = f2fs_iget(inode->i_sb, pino); 50 if (IS_ERR(dir)) { 51 err = PTR_ERR(dir); 52 goto out; 53 } 54 55 name.len = le32_to_cpu(raw_inode->i_namelen); 56 name.name = raw_inode->i_name; 57 58 if (unlikely(name.len > F2FS_NAME_LEN)) { 59 WARN_ON(1); 60 err = -ENAMETOOLONG; 61 goto out_err; 62 } 63 retry: 64 de = f2fs_find_entry(dir, &name, &page); 65 if (de && inode->i_ino == le32_to_cpu(de->ino)) 66 goto out_unmap_put; 67 if (de) { 68 einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino)); 69 if (IS_ERR(einode)) { 70 WARN_ON(1); 71 err = PTR_ERR(einode); 72 if (err == -ENOENT) 73 err = -EEXIST; 74 goto out_unmap_put; 75 } 76 err = acquire_orphan_inode(F2FS_SB(inode->i_sb)); 77 if (err) { 78 iput(einode); 79 goto out_unmap_put; 80 } 81 f2fs_delete_entry(de, page, einode); 82 iput(einode); 83 goto retry; 84 } 85 err = __f2fs_add_link(dir, &name, inode); 86 if (err) 87 goto out_err; 88 89 if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) { 90 iput(dir); 91 } else { 92 add_dirty_dir_inode(dir); 93 set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT); 94 } 95 96 goto out; 97 98 out_unmap_put: 99 kunmap(page); 100 f2fs_put_page(page, 0); 101 out_err: 102 iput(dir); 103 out: 104 f2fs_msg(inode->i_sb, KERN_NOTICE, 105 "%s: ino = %x, name = %s, dir = %lx, err = %d", 106 __func__, ino_of_node(ipage), raw_inode->i_name, 107 IS_ERR(dir) ? 0 : dir->i_ino, err); 108 return err; 109 } 110 111 static int recover_inode(struct inode *inode, struct page *node_page) 112 { 113 struct f2fs_inode *raw_inode = F2FS_INODE(node_page); 114 115 if (!IS_INODE(node_page)) 116 return 0; 117 118 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 119 i_size_write(inode, le64_to_cpu(raw_inode->i_size)); 120 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 121 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); 122 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 123 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 124 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); 125 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 126 127 if (is_dent_dnode(node_page)) 128 return recover_dentry(node_page, inode); 129 130 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s", 131 ino_of_node(node_page), raw_inode->i_name); 132 return 0; 133 } 134 135 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) 136 { 137 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); 138 struct curseg_info *curseg; 139 struct page *page; 140 block_t blkaddr; 141 int err = 0; 142 143 /* get node pages in the current segment */ 144 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); 145 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 146 147 /* read node page */ 148 page = alloc_page(GFP_F2FS_ZERO); 149 if (!page) 150 return -ENOMEM; 151 lock_page(page); 152 153 while (1) { 154 struct fsync_inode_entry *entry; 155 156 err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC); 157 if (err) 158 return err; 159 160 lock_page(page); 161 162 if (cp_ver != cpver_of_node(page)) 163 break; 164 165 if (!is_fsync_dnode(page)) 166 goto next; 167 168 entry = get_fsync_inode(head, ino_of_node(page)); 169 if (entry) { 170 if (IS_INODE(page) && is_dent_dnode(page)) 171 set_inode_flag(F2FS_I(entry->inode), 172 FI_INC_LINK); 173 } else { 174 if (IS_INODE(page) && is_dent_dnode(page)) { 175 err = recover_inode_page(sbi, page); 176 if (err) 177 break; 178 } 179 180 /* add this fsync inode to the list */ 181 entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS); 182 if (!entry) { 183 err = -ENOMEM; 184 break; 185 } 186 187 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); 188 if (IS_ERR(entry->inode)) { 189 err = PTR_ERR(entry->inode); 190 kmem_cache_free(fsync_entry_slab, entry); 191 break; 192 } 193 list_add_tail(&entry->list, head); 194 } 195 entry->blkaddr = blkaddr; 196 197 err = recover_inode(entry->inode, page); 198 if (err && err != -ENOENT) 199 break; 200 next: 201 /* check next segment */ 202 blkaddr = next_blkaddr_of_node(page); 203 } 204 205 unlock_page(page); 206 __free_pages(page, 0); 207 208 return err; 209 } 210 211 static void destroy_fsync_dnodes(struct list_head *head) 212 { 213 struct fsync_inode_entry *entry, *tmp; 214 215 list_for_each_entry_safe(entry, tmp, head, list) { 216 iput(entry->inode); 217 list_del(&entry->list); 218 kmem_cache_free(fsync_entry_slab, entry); 219 } 220 } 221 222 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, 223 block_t blkaddr, struct dnode_of_data *dn) 224 { 225 struct seg_entry *sentry; 226 unsigned int segno = GET_SEGNO(sbi, blkaddr); 227 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 228 struct f2fs_summary_block *sum_node; 229 struct f2fs_summary sum; 230 struct page *sum_page, *node_page; 231 nid_t ino, nid; 232 struct inode *inode; 233 unsigned int offset; 234 block_t bidx; 235 int i; 236 237 sentry = get_seg_entry(sbi, segno); 238 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map)) 239 return 0; 240 241 /* Get the previous summary */ 242 for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) { 243 struct curseg_info *curseg = CURSEG_I(sbi, i); 244 if (curseg->segno == segno) { 245 sum = curseg->sum_blk->entries[blkoff]; 246 goto got_it; 247 } 248 } 249 250 sum_page = get_sum_page(sbi, segno); 251 sum_node = (struct f2fs_summary_block *)page_address(sum_page); 252 sum = sum_node->entries[blkoff]; 253 f2fs_put_page(sum_page, 1); 254 got_it: 255 /* Use the locked dnode page and inode */ 256 nid = le32_to_cpu(sum.nid); 257 if (dn->inode->i_ino == nid) { 258 struct dnode_of_data tdn = *dn; 259 tdn.nid = nid; 260 tdn.node_page = dn->inode_page; 261 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); 262 truncate_data_blocks_range(&tdn, 1); 263 return 0; 264 } else if (dn->nid == nid) { 265 struct dnode_of_data tdn = *dn; 266 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); 267 truncate_data_blocks_range(&tdn, 1); 268 return 0; 269 } 270 271 /* Get the node page */ 272 node_page = get_node_page(sbi, nid); 273 if (IS_ERR(node_page)) 274 return PTR_ERR(node_page); 275 276 offset = ofs_of_node(node_page); 277 ino = ino_of_node(node_page); 278 f2fs_put_page(node_page, 1); 279 280 /* Deallocate previous index in the node page */ 281 inode = f2fs_iget(sbi->sb, ino); 282 if (IS_ERR(inode)) 283 return PTR_ERR(inode); 284 285 bidx = start_bidx_of_node(offset, F2FS_I(inode)) + 286 le16_to_cpu(sum.ofs_in_node); 287 288 truncate_hole(inode, bidx, bidx + 1); 289 iput(inode); 290 return 0; 291 } 292 293 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, 294 struct page *page, block_t blkaddr) 295 { 296 struct f2fs_inode_info *fi = F2FS_I(inode); 297 unsigned int start, end; 298 struct dnode_of_data dn; 299 struct f2fs_summary sum; 300 struct node_info ni; 301 int err = 0, recovered = 0; 302 303 recover_inline_xattr(inode, page); 304 305 if (recover_inline_data(inode, page)) 306 goto out; 307 308 if (recover_xattr_data(inode, page, blkaddr)) 309 goto out; 310 311 start = start_bidx_of_node(ofs_of_node(page), fi); 312 end = start + ADDRS_PER_PAGE(page, fi); 313 314 f2fs_lock_op(sbi); 315 316 set_new_dnode(&dn, inode, NULL, NULL, 0); 317 318 err = get_dnode_of_data(&dn, start, ALLOC_NODE); 319 if (err) { 320 f2fs_unlock_op(sbi); 321 goto out; 322 } 323 324 f2fs_wait_on_page_writeback(dn.node_page, NODE); 325 326 get_node_info(sbi, dn.nid, &ni); 327 f2fs_bug_on(ni.ino != ino_of_node(page)); 328 f2fs_bug_on(ofs_of_node(dn.node_page) != ofs_of_node(page)); 329 330 for (; start < end; start++) { 331 block_t src, dest; 332 333 src = datablock_addr(dn.node_page, dn.ofs_in_node); 334 dest = datablock_addr(page, dn.ofs_in_node); 335 336 if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) { 337 if (src == NULL_ADDR) { 338 err = reserve_new_block(&dn); 339 /* We should not get -ENOSPC */ 340 f2fs_bug_on(err); 341 } 342 343 /* Check the previous node page having this index */ 344 err = check_index_in_prev_nodes(sbi, dest, &dn); 345 if (err) 346 goto err; 347 348 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); 349 350 /* write dummy data page */ 351 recover_data_page(sbi, NULL, &sum, src, dest); 352 update_extent_cache(dest, &dn); 353 recovered++; 354 } 355 dn.ofs_in_node++; 356 } 357 358 /* write node page in place */ 359 set_summary(&sum, dn.nid, 0, 0); 360 if (IS_INODE(dn.node_page)) 361 sync_inode_page(&dn); 362 363 copy_node_footer(dn.node_page, page); 364 fill_node_footer(dn.node_page, dn.nid, ni.ino, 365 ofs_of_node(page), false); 366 set_page_dirty(dn.node_page); 367 368 recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr); 369 err: 370 f2fs_put_dnode(&dn); 371 f2fs_unlock_op(sbi); 372 out: 373 f2fs_msg(sbi->sb, KERN_NOTICE, 374 "recover_data: ino = %lx, recovered = %d blocks, err = %d", 375 inode->i_ino, recovered, err); 376 return err; 377 } 378 379 static int recover_data(struct f2fs_sb_info *sbi, 380 struct list_head *head, int type) 381 { 382 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); 383 struct curseg_info *curseg; 384 struct page *page; 385 int err = 0; 386 block_t blkaddr; 387 388 /* get node pages in the current segment */ 389 curseg = CURSEG_I(sbi, type); 390 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 391 392 /* read node page */ 393 page = alloc_page(GFP_F2FS_ZERO); 394 if (!page) 395 return -ENOMEM; 396 397 lock_page(page); 398 399 while (1) { 400 struct fsync_inode_entry *entry; 401 402 err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC); 403 if (err) 404 return err; 405 406 lock_page(page); 407 408 if (cp_ver != cpver_of_node(page)) 409 break; 410 411 entry = get_fsync_inode(head, ino_of_node(page)); 412 if (!entry) 413 goto next; 414 415 err = do_recover_data(sbi, entry->inode, page, blkaddr); 416 if (err) 417 break; 418 419 if (entry->blkaddr == blkaddr) { 420 iput(entry->inode); 421 list_del(&entry->list); 422 kmem_cache_free(fsync_entry_slab, entry); 423 } 424 next: 425 /* check next segment */ 426 blkaddr = next_blkaddr_of_node(page); 427 } 428 429 unlock_page(page); 430 __free_pages(page, 0); 431 432 if (!err) 433 allocate_new_segments(sbi); 434 return err; 435 } 436 437 int recover_fsync_data(struct f2fs_sb_info *sbi) 438 { 439 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); 440 struct list_head inode_list; 441 block_t blkaddr; 442 int err; 443 bool need_writecp = false; 444 445 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", 446 sizeof(struct fsync_inode_entry)); 447 if (!fsync_entry_slab) 448 return -ENOMEM; 449 450 INIT_LIST_HEAD(&inode_list); 451 452 /* step #1: find fsynced inode numbers */ 453 sbi->por_doing = true; 454 455 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 456 457 err = find_fsync_dnodes(sbi, &inode_list); 458 if (err) 459 goto out; 460 461 if (list_empty(&inode_list)) 462 goto out; 463 464 need_writecp = true; 465 466 /* step #2: recover data */ 467 err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE); 468 f2fs_bug_on(!list_empty(&inode_list)); 469 out: 470 destroy_fsync_dnodes(&inode_list); 471 kmem_cache_destroy(fsync_entry_slab); 472 473 if (err) { 474 truncate_inode_pages_final(NODE_MAPPING(sbi)); 475 truncate_inode_pages_final(META_MAPPING(sbi)); 476 } 477 478 sbi->por_doing = false; 479 if (err) { 480 discard_next_dnode(sbi, blkaddr); 481 482 /* Flush all the NAT/SIT pages */ 483 while (get_pages(sbi, F2FS_DIRTY_META)) 484 sync_meta_pages(sbi, META, LONG_MAX); 485 } else if (need_writecp) { 486 write_checkpoint(sbi, false); 487 } 488 return err; 489 } 490