1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/recovery.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include "f2fs.h" 11 #include "node.h" 12 #include "segment.h" 13 14 /* 15 * Roll forward recovery scenarios. 16 * 17 * [Term] F: fsync_mark, D: dentry_mark 18 * 19 * 1. inode(x) | CP | inode(x) | dnode(F) 20 * -> Update the latest inode(x). 21 * 22 * 2. inode(x) | CP | inode(F) | dnode(F) 23 * -> No problem. 24 * 25 * 3. inode(x) | CP | dnode(F) | inode(x) 26 * -> Recover to the latest dnode(F), and drop the last inode(x) 27 * 28 * 4. inode(x) | CP | dnode(F) | inode(F) 29 * -> No problem. 30 * 31 * 5. CP | inode(x) | dnode(F) 32 * -> The inode(DF) was missing. Should drop this dnode(F). 33 * 34 * 6. CP | inode(DF) | dnode(F) 35 * -> No problem. 36 * 37 * 7. CP | dnode(F) | inode(DF) 38 * -> If f2fs_iget fails, then goto next to find inode(DF). 39 * 40 * 8. CP | dnode(F) | inode(x) 41 * -> If f2fs_iget fails, then goto next to find inode(DF). 42 * But it will fail due to no inode(DF). 43 */ 44 45 static struct kmem_cache *fsync_entry_slab; 46 47 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi) 48 { 49 s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count); 50 51 if (sbi->last_valid_block_count + nalloc > sbi->user_block_count) 52 return false; 53 return true; 54 } 55 56 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head, 57 nid_t ino) 58 { 59 struct fsync_inode_entry *entry; 60 61 list_for_each_entry(entry, head, list) 62 if (entry->inode->i_ino == ino) 63 return entry; 64 65 return NULL; 66 } 67 68 static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi, 69 struct list_head *head, nid_t ino, bool quota_inode) 70 { 71 struct inode *inode; 72 struct fsync_inode_entry *entry; 73 int err; 74 75 inode = f2fs_iget_retry(sbi->sb, ino); 76 if (IS_ERR(inode)) 77 return ERR_CAST(inode); 78 79 err = dquot_initialize(inode); 80 if (err) 81 goto err_out; 82 83 if (quota_inode) { 84 err = dquot_alloc_inode(inode); 85 if (err) 86 goto err_out; 87 } 88 89 entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO); 90 entry->inode = inode; 91 list_add_tail(&entry->list, head); 92 93 return entry; 94 err_out: 95 iput(inode); 96 return ERR_PTR(err); 97 } 98 99 static void del_fsync_inode(struct fsync_inode_entry *entry, int drop) 100 { 101 if (drop) { 102 /* inode should not be recovered, drop it */ 103 f2fs_inode_synced(entry->inode); 104 } 105 iput(entry->inode); 106 list_del(&entry->list); 107 kmem_cache_free(fsync_entry_slab, entry); 108 } 109 110 static int recover_dentry(struct inode *inode, struct page *ipage, 111 struct list_head *dir_list) 112 { 113 struct f2fs_inode *raw_inode = F2FS_INODE(ipage); 114 nid_t pino = le32_to_cpu(raw_inode->i_pino); 115 struct f2fs_dir_entry *de; 116 struct fscrypt_name fname; 117 struct page *page; 118 struct inode *dir, *einode; 119 struct fsync_inode_entry *entry; 120 int err = 0; 121 char *name; 122 123 entry = get_fsync_inode(dir_list, pino); 124 if (!entry) { 125 entry = add_fsync_inode(F2FS_I_SB(inode), dir_list, 126 pino, false); 127 if (IS_ERR(entry)) { 128 dir = ERR_CAST(entry); 129 err = PTR_ERR(entry); 130 goto out; 131 } 132 } 133 134 dir = entry->inode; 135 136 memset(&fname, 0, sizeof(struct fscrypt_name)); 137 fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen); 138 fname.disk_name.name = raw_inode->i_name; 139 140 if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) { 141 WARN_ON(1); 142 err = -ENAMETOOLONG; 143 goto out; 144 } 145 retry: 146 de = __f2fs_find_entry(dir, &fname, &page); 147 if (de && inode->i_ino == le32_to_cpu(de->ino)) 148 goto out_put; 149 150 if (de) { 151 einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino)); 152 if (IS_ERR(einode)) { 153 WARN_ON(1); 154 err = PTR_ERR(einode); 155 if (err == -ENOENT) 156 err = -EEXIST; 157 goto out_put; 158 } 159 160 err = dquot_initialize(einode); 161 if (err) { 162 iput(einode); 163 goto out_put; 164 } 165 166 err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode)); 167 if (err) { 168 iput(einode); 169 goto out_put; 170 } 171 f2fs_delete_entry(de, page, dir, einode); 172 iput(einode); 173 goto retry; 174 } else if (IS_ERR(page)) { 175 err = PTR_ERR(page); 176 } else { 177 err = f2fs_add_dentry(dir, &fname, inode, 178 inode->i_ino, inode->i_mode); 179 } 180 if (err == -ENOMEM) 181 goto retry; 182 goto out; 183 184 out_put: 185 f2fs_put_page(page, 0); 186 out: 187 if (file_enc_name(inode)) 188 name = "<encrypted>"; 189 else 190 name = raw_inode->i_name; 191 f2fs_msg(inode->i_sb, KERN_NOTICE, 192 "%s: ino = %x, name = %s, dir = %lx, err = %d", 193 __func__, ino_of_node(ipage), name, 194 IS_ERR(dir) ? 0 : dir->i_ino, err); 195 return err; 196 } 197 198 static int recover_quota_data(struct inode *inode, struct page *page) 199 { 200 struct f2fs_inode *raw = F2FS_INODE(page); 201 struct iattr attr; 202 uid_t i_uid = le32_to_cpu(raw->i_uid); 203 gid_t i_gid = le32_to_cpu(raw->i_gid); 204 int err; 205 206 memset(&attr, 0, sizeof(attr)); 207 208 attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid); 209 attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid); 210 211 if (!uid_eq(attr.ia_uid, inode->i_uid)) 212 attr.ia_valid |= ATTR_UID; 213 if (!gid_eq(attr.ia_gid, inode->i_gid)) 214 attr.ia_valid |= ATTR_GID; 215 216 if (!attr.ia_valid) 217 return 0; 218 219 err = dquot_transfer(inode, &attr); 220 if (err) 221 set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR); 222 return err; 223 } 224 225 static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri) 226 { 227 if (ri->i_inline & F2FS_PIN_FILE) 228 set_inode_flag(inode, FI_PIN_FILE); 229 else 230 clear_inode_flag(inode, FI_PIN_FILE); 231 if (ri->i_inline & F2FS_DATA_EXIST) 232 set_inode_flag(inode, FI_DATA_EXIST); 233 else 234 clear_inode_flag(inode, FI_DATA_EXIST); 235 } 236 237 static int recover_inode(struct inode *inode, struct page *page) 238 { 239 struct f2fs_inode *raw = F2FS_INODE(page); 240 char *name; 241 int err; 242 243 inode->i_mode = le16_to_cpu(raw->i_mode); 244 245 err = recover_quota_data(inode, page); 246 if (err) 247 return err; 248 249 i_uid_write(inode, le32_to_cpu(raw->i_uid)); 250 i_gid_write(inode, le32_to_cpu(raw->i_gid)); 251 252 if (raw->i_inline & F2FS_EXTRA_ATTR) { 253 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) && 254 F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize), 255 i_projid)) { 256 projid_t i_projid; 257 258 i_projid = (projid_t)le32_to_cpu(raw->i_projid); 259 F2FS_I(inode)->i_projid = 260 make_kprojid(&init_user_ns, i_projid); 261 } 262 } 263 264 f2fs_i_size_write(inode, le64_to_cpu(raw->i_size)); 265 inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime); 266 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime); 267 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime); 268 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec); 269 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec); 270 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec); 271 272 F2FS_I(inode)->i_advise = raw->i_advise; 273 F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags); 274 f2fs_set_inode_flags(inode); 275 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = 276 le16_to_cpu(raw->i_gc_failures); 277 278 recover_inline_flags(inode, raw); 279 280 f2fs_mark_inode_dirty_sync(inode, true); 281 282 if (file_enc_name(inode)) 283 name = "<encrypted>"; 284 else 285 name = F2FS_INODE(page)->i_name; 286 287 f2fs_msg(inode->i_sb, KERN_NOTICE, 288 "recover_inode: ino = %x, name = %s, inline = %x", 289 ino_of_node(page), name, raw->i_inline); 290 return 0; 291 } 292 293 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head, 294 bool check_only) 295 { 296 struct curseg_info *curseg; 297 struct page *page = NULL; 298 block_t blkaddr; 299 unsigned int loop_cnt = 0; 300 unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg - 301 valid_user_blocks(sbi); 302 int err = 0; 303 304 /* get node pages in the current segment */ 305 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); 306 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 307 308 while (1) { 309 struct fsync_inode_entry *entry; 310 311 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR)) 312 return 0; 313 314 page = f2fs_get_tmp_page(sbi, blkaddr); 315 if (IS_ERR(page)) { 316 err = PTR_ERR(page); 317 break; 318 } 319 320 if (!is_recoverable_dnode(page)) 321 break; 322 323 if (!is_fsync_dnode(page)) 324 goto next; 325 326 entry = get_fsync_inode(head, ino_of_node(page)); 327 if (!entry) { 328 bool quota_inode = false; 329 330 if (!check_only && 331 IS_INODE(page) && is_dent_dnode(page)) { 332 err = f2fs_recover_inode_page(sbi, page); 333 if (err) 334 break; 335 quota_inode = true; 336 } 337 338 /* 339 * CP | dnode(F) | inode(DF) 340 * For this case, we should not give up now. 341 */ 342 entry = add_fsync_inode(sbi, head, ino_of_node(page), 343 quota_inode); 344 if (IS_ERR(entry)) { 345 err = PTR_ERR(entry); 346 if (err == -ENOENT) { 347 err = 0; 348 goto next; 349 } 350 break; 351 } 352 } 353 entry->blkaddr = blkaddr; 354 355 if (IS_INODE(page) && is_dent_dnode(page)) 356 entry->last_dentry = blkaddr; 357 next: 358 /* sanity check in order to detect looped node chain */ 359 if (++loop_cnt >= free_blocks || 360 blkaddr == next_blkaddr_of_node(page)) { 361 f2fs_msg(sbi->sb, KERN_NOTICE, 362 "%s: detect looped node chain, " 363 "blkaddr:%u, next:%u", 364 __func__, blkaddr, next_blkaddr_of_node(page)); 365 err = -EINVAL; 366 break; 367 } 368 369 /* check next segment */ 370 blkaddr = next_blkaddr_of_node(page); 371 f2fs_put_page(page, 1); 372 373 f2fs_ra_meta_pages_cond(sbi, blkaddr); 374 } 375 f2fs_put_page(page, 1); 376 return err; 377 } 378 379 static void destroy_fsync_dnodes(struct list_head *head, int drop) 380 { 381 struct fsync_inode_entry *entry, *tmp; 382 383 list_for_each_entry_safe(entry, tmp, head, list) 384 del_fsync_inode(entry, drop); 385 } 386 387 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, 388 block_t blkaddr, struct dnode_of_data *dn) 389 { 390 struct seg_entry *sentry; 391 unsigned int segno = GET_SEGNO(sbi, blkaddr); 392 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 393 struct f2fs_summary_block *sum_node; 394 struct f2fs_summary sum; 395 struct page *sum_page, *node_page; 396 struct dnode_of_data tdn = *dn; 397 nid_t ino, nid; 398 struct inode *inode; 399 unsigned int offset; 400 block_t bidx; 401 int i; 402 403 sentry = get_seg_entry(sbi, segno); 404 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map)) 405 return 0; 406 407 /* Get the previous summary */ 408 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 409 struct curseg_info *curseg = CURSEG_I(sbi, i); 410 if (curseg->segno == segno) { 411 sum = curseg->sum_blk->entries[blkoff]; 412 goto got_it; 413 } 414 } 415 416 sum_page = f2fs_get_sum_page(sbi, segno); 417 if (IS_ERR(sum_page)) 418 return PTR_ERR(sum_page); 419 sum_node = (struct f2fs_summary_block *)page_address(sum_page); 420 sum = sum_node->entries[blkoff]; 421 f2fs_put_page(sum_page, 1); 422 got_it: 423 /* Use the locked dnode page and inode */ 424 nid = le32_to_cpu(sum.nid); 425 if (dn->inode->i_ino == nid) { 426 tdn.nid = nid; 427 if (!dn->inode_page_locked) 428 lock_page(dn->inode_page); 429 tdn.node_page = dn->inode_page; 430 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); 431 goto truncate_out; 432 } else if (dn->nid == nid) { 433 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); 434 goto truncate_out; 435 } 436 437 /* Get the node page */ 438 node_page = f2fs_get_node_page(sbi, nid); 439 if (IS_ERR(node_page)) 440 return PTR_ERR(node_page); 441 442 offset = ofs_of_node(node_page); 443 ino = ino_of_node(node_page); 444 f2fs_put_page(node_page, 1); 445 446 if (ino != dn->inode->i_ino) { 447 int ret; 448 449 /* Deallocate previous index in the node page */ 450 inode = f2fs_iget_retry(sbi->sb, ino); 451 if (IS_ERR(inode)) 452 return PTR_ERR(inode); 453 454 ret = dquot_initialize(inode); 455 if (ret) { 456 iput(inode); 457 return ret; 458 } 459 } else { 460 inode = dn->inode; 461 } 462 463 bidx = f2fs_start_bidx_of_node(offset, inode) + 464 le16_to_cpu(sum.ofs_in_node); 465 466 /* 467 * if inode page is locked, unlock temporarily, but its reference 468 * count keeps alive. 469 */ 470 if (ino == dn->inode->i_ino && dn->inode_page_locked) 471 unlock_page(dn->inode_page); 472 473 set_new_dnode(&tdn, inode, NULL, NULL, 0); 474 if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE)) 475 goto out; 476 477 if (tdn.data_blkaddr == blkaddr) 478 f2fs_truncate_data_blocks_range(&tdn, 1); 479 480 f2fs_put_dnode(&tdn); 481 out: 482 if (ino != dn->inode->i_ino) 483 iput(inode); 484 else if (dn->inode_page_locked) 485 lock_page(dn->inode_page); 486 return 0; 487 488 truncate_out: 489 if (datablock_addr(tdn.inode, tdn.node_page, 490 tdn.ofs_in_node) == blkaddr) 491 f2fs_truncate_data_blocks_range(&tdn, 1); 492 if (dn->inode->i_ino == nid && !dn->inode_page_locked) 493 unlock_page(dn->inode_page); 494 return 0; 495 } 496 497 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, 498 struct page *page) 499 { 500 struct dnode_of_data dn; 501 struct node_info ni; 502 unsigned int start, end; 503 int err = 0, recovered = 0; 504 505 /* step 1: recover xattr */ 506 if (IS_INODE(page)) { 507 f2fs_recover_inline_xattr(inode, page); 508 } else if (f2fs_has_xattr_block(ofs_of_node(page))) { 509 err = f2fs_recover_xattr_data(inode, page); 510 if (!err) 511 recovered++; 512 goto out; 513 } 514 515 /* step 2: recover inline data */ 516 if (f2fs_recover_inline_data(inode, page)) 517 goto out; 518 519 /* step 3: recover data indices */ 520 start = f2fs_start_bidx_of_node(ofs_of_node(page), inode); 521 end = start + ADDRS_PER_PAGE(page, inode); 522 523 set_new_dnode(&dn, inode, NULL, NULL, 0); 524 retry_dn: 525 err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE); 526 if (err) { 527 if (err == -ENOMEM) { 528 congestion_wait(BLK_RW_ASYNC, HZ/50); 529 goto retry_dn; 530 } 531 goto out; 532 } 533 534 f2fs_wait_on_page_writeback(dn.node_page, NODE, true); 535 536 err = f2fs_get_node_info(sbi, dn.nid, &ni); 537 if (err) 538 goto err; 539 540 f2fs_bug_on(sbi, ni.ino != ino_of_node(page)); 541 f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page)); 542 543 for (; start < end; start++, dn.ofs_in_node++) { 544 block_t src, dest; 545 546 src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node); 547 dest = datablock_addr(dn.inode, page, dn.ofs_in_node); 548 549 /* skip recovering if dest is the same as src */ 550 if (src == dest) 551 continue; 552 553 /* dest is invalid, just invalidate src block */ 554 if (dest == NULL_ADDR) { 555 f2fs_truncate_data_blocks_range(&dn, 1); 556 continue; 557 } 558 559 if (!file_keep_isize(inode) && 560 (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT))) 561 f2fs_i_size_write(inode, 562 (loff_t)(start + 1) << PAGE_SHIFT); 563 564 /* 565 * dest is reserved block, invalidate src block 566 * and then reserve one new block in dnode page. 567 */ 568 if (dest == NEW_ADDR) { 569 f2fs_truncate_data_blocks_range(&dn, 1); 570 f2fs_reserve_new_block(&dn); 571 continue; 572 } 573 574 /* dest is valid block, try to recover from src to dest */ 575 if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) { 576 577 if (src == NULL_ADDR) { 578 err = f2fs_reserve_new_block(&dn); 579 while (err && 580 IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) 581 err = f2fs_reserve_new_block(&dn); 582 /* We should not get -ENOSPC */ 583 f2fs_bug_on(sbi, err); 584 if (err) 585 goto err; 586 } 587 retry_prev: 588 /* Check the previous node page having this index */ 589 err = check_index_in_prev_nodes(sbi, dest, &dn); 590 if (err) { 591 if (err == -ENOMEM) { 592 congestion_wait(BLK_RW_ASYNC, HZ/50); 593 goto retry_prev; 594 } 595 goto err; 596 } 597 598 /* write dummy data page */ 599 f2fs_replace_block(sbi, &dn, src, dest, 600 ni.version, false, false); 601 recovered++; 602 } 603 } 604 605 copy_node_footer(dn.node_page, page); 606 fill_node_footer(dn.node_page, dn.nid, ni.ino, 607 ofs_of_node(page), false); 608 set_page_dirty(dn.node_page); 609 err: 610 f2fs_put_dnode(&dn); 611 out: 612 f2fs_msg(sbi->sb, KERN_NOTICE, 613 "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d", 614 inode->i_ino, 615 file_keep_isize(inode) ? "keep" : "recover", 616 recovered, err); 617 return err; 618 } 619 620 static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list, 621 struct list_head *tmp_inode_list, struct list_head *dir_list) 622 { 623 struct curseg_info *curseg; 624 struct page *page = NULL; 625 int err = 0; 626 block_t blkaddr; 627 628 /* get node pages in the current segment */ 629 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); 630 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 631 632 while (1) { 633 struct fsync_inode_entry *entry; 634 635 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR)) 636 break; 637 638 f2fs_ra_meta_pages_cond(sbi, blkaddr); 639 640 page = f2fs_get_tmp_page(sbi, blkaddr); 641 if (IS_ERR(page)) { 642 err = PTR_ERR(page); 643 break; 644 } 645 646 if (!is_recoverable_dnode(page)) { 647 f2fs_put_page(page, 1); 648 break; 649 } 650 651 entry = get_fsync_inode(inode_list, ino_of_node(page)); 652 if (!entry) 653 goto next; 654 /* 655 * inode(x) | CP | inode(x) | dnode(F) 656 * In this case, we can lose the latest inode(x). 657 * So, call recover_inode for the inode update. 658 */ 659 if (IS_INODE(page)) { 660 err = recover_inode(entry->inode, page); 661 if (err) 662 break; 663 } 664 if (entry->last_dentry == blkaddr) { 665 err = recover_dentry(entry->inode, page, dir_list); 666 if (err) { 667 f2fs_put_page(page, 1); 668 break; 669 } 670 } 671 err = do_recover_data(sbi, entry->inode, page); 672 if (err) { 673 f2fs_put_page(page, 1); 674 break; 675 } 676 677 if (entry->blkaddr == blkaddr) 678 list_move_tail(&entry->list, tmp_inode_list); 679 next: 680 /* check next segment */ 681 blkaddr = next_blkaddr_of_node(page); 682 f2fs_put_page(page, 1); 683 } 684 if (!err) 685 f2fs_allocate_new_segments(sbi); 686 return err; 687 } 688 689 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) 690 { 691 struct list_head inode_list, tmp_inode_list; 692 struct list_head dir_list; 693 int err; 694 int ret = 0; 695 unsigned long s_flags = sbi->sb->s_flags; 696 bool need_writecp = false; 697 #ifdef CONFIG_QUOTA 698 int quota_enabled; 699 #endif 700 701 if (s_flags & SB_RDONLY) { 702 f2fs_msg(sbi->sb, KERN_INFO, 703 "recover fsync data on readonly fs"); 704 sbi->sb->s_flags &= ~SB_RDONLY; 705 } 706 707 #ifdef CONFIG_QUOTA 708 /* Needed for iput() to work correctly and not trash data */ 709 sbi->sb->s_flags |= SB_ACTIVE; 710 /* Turn on quotas so that they are updated correctly */ 711 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY); 712 #endif 713 714 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", 715 sizeof(struct fsync_inode_entry)); 716 if (!fsync_entry_slab) { 717 err = -ENOMEM; 718 goto out; 719 } 720 721 INIT_LIST_HEAD(&inode_list); 722 INIT_LIST_HEAD(&tmp_inode_list); 723 INIT_LIST_HEAD(&dir_list); 724 725 /* prevent checkpoint */ 726 mutex_lock(&sbi->cp_mutex); 727 728 /* step #1: find fsynced inode numbers */ 729 err = find_fsync_dnodes(sbi, &inode_list, check_only); 730 if (err || list_empty(&inode_list)) 731 goto skip; 732 733 if (check_only) { 734 ret = 1; 735 goto skip; 736 } 737 738 need_writecp = true; 739 740 /* step #2: recover data */ 741 err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list); 742 if (!err) 743 f2fs_bug_on(sbi, !list_empty(&inode_list)); 744 else { 745 /* restore s_flags to let iput() trash data */ 746 sbi->sb->s_flags = s_flags; 747 } 748 skip: 749 destroy_fsync_dnodes(&inode_list, err); 750 destroy_fsync_dnodes(&tmp_inode_list, err); 751 752 /* truncate meta pages to be used by the recovery */ 753 truncate_inode_pages_range(META_MAPPING(sbi), 754 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1); 755 756 if (err) { 757 truncate_inode_pages_final(NODE_MAPPING(sbi)); 758 truncate_inode_pages_final(META_MAPPING(sbi)); 759 } else { 760 clear_sbi_flag(sbi, SBI_POR_DOING); 761 } 762 mutex_unlock(&sbi->cp_mutex); 763 764 /* let's drop all the directory inodes for clean checkpoint */ 765 destroy_fsync_dnodes(&dir_list, err); 766 767 if (need_writecp) { 768 set_sbi_flag(sbi, SBI_IS_RECOVERED); 769 770 if (!err) { 771 struct cp_control cpc = { 772 .reason = CP_RECOVERY, 773 }; 774 err = f2fs_write_checkpoint(sbi, &cpc); 775 } 776 } 777 778 kmem_cache_destroy(fsync_entry_slab); 779 out: 780 #ifdef CONFIG_QUOTA 781 /* Turn quotas off */ 782 if (quota_enabled) 783 f2fs_quota_off_umount(sbi->sb); 784 #endif 785 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ 786 787 return ret ? ret: err; 788 } 789