1 /* 2 * fs/f2fs/recovery.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include "f2fs.h" 14 #include "node.h" 15 #include "segment.h" 16 17 static struct kmem_cache *fsync_entry_slab; 18 19 bool space_for_roll_forward(struct f2fs_sb_info *sbi) 20 { 21 if (sbi->last_valid_block_count + sbi->alloc_valid_block_count 22 > sbi->user_block_count) 23 return false; 24 return true; 25 } 26 27 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head, 28 nid_t ino) 29 { 30 struct list_head *this; 31 struct fsync_inode_entry *entry; 32 33 list_for_each(this, head) { 34 entry = list_entry(this, struct fsync_inode_entry, list); 35 if (entry->inode->i_ino == ino) 36 return entry; 37 } 38 return NULL; 39 } 40 41 static int recover_dentry(struct page *ipage, struct inode *inode) 42 { 43 void *kaddr = page_address(ipage); 44 struct f2fs_node *raw_node = (struct f2fs_node *)kaddr; 45 struct f2fs_inode *raw_inode = &(raw_node->i); 46 nid_t pino = le32_to_cpu(raw_inode->i_pino); 47 struct f2fs_dir_entry *de; 48 struct qstr name; 49 struct page *page; 50 struct inode *dir, *einode; 51 int err = 0; 52 53 dir = check_dirty_dir_inode(F2FS_SB(inode->i_sb), pino); 54 if (!dir) { 55 dir = f2fs_iget(inode->i_sb, pino); 56 if (IS_ERR(dir)) { 57 err = PTR_ERR(dir); 58 goto out; 59 } 60 set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT); 61 add_dirty_dir_inode(dir); 62 } 63 64 name.len = le32_to_cpu(raw_inode->i_namelen); 65 name.name = raw_inode->i_name; 66 retry: 67 de = f2fs_find_entry(dir, &name, &page); 68 if (de && inode->i_ino == le32_to_cpu(de->ino)) { 69 kunmap(page); 70 f2fs_put_page(page, 0); 71 goto out; 72 } 73 if (de) { 74 einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino)); 75 if (IS_ERR(einode)) { 76 WARN_ON(1); 77 if (PTR_ERR(einode) == -ENOENT) 78 err = -EEXIST; 79 goto out; 80 } 81 f2fs_delete_entry(de, page, einode); 82 iput(einode); 83 goto retry; 84 } 85 err = __f2fs_add_link(dir, &name, inode); 86 out: 87 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode and its dentry: " 88 "ino = %x, name = %s, dir = %lx, err = %d", 89 ino_of_node(ipage), raw_inode->i_name, 90 IS_ERR(dir) ? 0 : dir->i_ino, err); 91 return err; 92 } 93 94 static int recover_inode(struct inode *inode, struct page *node_page) 95 { 96 void *kaddr = page_address(node_page); 97 struct f2fs_node *raw_node = (struct f2fs_node *)kaddr; 98 struct f2fs_inode *raw_inode = &(raw_node->i); 99 100 if (!IS_INODE(node_page)) 101 return 0; 102 103 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 104 i_size_write(inode, le64_to_cpu(raw_inode->i_size)); 105 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 106 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); 107 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 108 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 109 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); 110 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 111 112 if (is_dent_dnode(node_page)) 113 return recover_dentry(node_page, inode); 114 115 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s", 116 ino_of_node(node_page), raw_inode->i_name); 117 return 0; 118 } 119 120 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) 121 { 122 unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver); 123 struct curseg_info *curseg; 124 struct page *page; 125 block_t blkaddr; 126 int err = 0; 127 128 /* get node pages in the current segment */ 129 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); 130 blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff; 131 132 /* read node page */ 133 page = alloc_page(GFP_F2FS_ZERO); 134 if (IS_ERR(page)) 135 return PTR_ERR(page); 136 lock_page(page); 137 138 while (1) { 139 struct fsync_inode_entry *entry; 140 141 err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC); 142 if (err) 143 goto out; 144 145 lock_page(page); 146 147 if (cp_ver != cpver_of_node(page)) 148 break; 149 150 if (!is_fsync_dnode(page)) 151 goto next; 152 153 entry = get_fsync_inode(head, ino_of_node(page)); 154 if (entry) { 155 if (IS_INODE(page) && is_dent_dnode(page)) 156 set_inode_flag(F2FS_I(entry->inode), 157 FI_INC_LINK); 158 } else { 159 if (IS_INODE(page) && is_dent_dnode(page)) { 160 err = recover_inode_page(sbi, page); 161 if (err) 162 break; 163 } 164 165 /* add this fsync inode to the list */ 166 entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS); 167 if (!entry) { 168 err = -ENOMEM; 169 break; 170 } 171 172 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); 173 if (IS_ERR(entry->inode)) { 174 err = PTR_ERR(entry->inode); 175 kmem_cache_free(fsync_entry_slab, entry); 176 break; 177 } 178 list_add_tail(&entry->list, head); 179 } 180 entry->blkaddr = blkaddr; 181 182 err = recover_inode(entry->inode, page); 183 if (err && err != -ENOENT) 184 break; 185 next: 186 /* check next segment */ 187 blkaddr = next_blkaddr_of_node(page); 188 } 189 unlock_page(page); 190 out: 191 __free_pages(page, 0); 192 return err; 193 } 194 195 static void destroy_fsync_dnodes(struct list_head *head) 196 { 197 struct fsync_inode_entry *entry, *tmp; 198 199 list_for_each_entry_safe(entry, tmp, head, list) { 200 iput(entry->inode); 201 list_del(&entry->list); 202 kmem_cache_free(fsync_entry_slab, entry); 203 } 204 } 205 206 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, 207 block_t blkaddr, struct dnode_of_data *dn) 208 { 209 struct seg_entry *sentry; 210 unsigned int segno = GET_SEGNO(sbi, blkaddr); 211 unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & 212 (sbi->blocks_per_seg - 1); 213 struct f2fs_summary sum; 214 nid_t ino, nid; 215 void *kaddr; 216 struct inode *inode; 217 struct page *node_page; 218 block_t bidx; 219 int i; 220 221 sentry = get_seg_entry(sbi, segno); 222 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map)) 223 return 0; 224 225 /* Get the previous summary */ 226 for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) { 227 struct curseg_info *curseg = CURSEG_I(sbi, i); 228 if (curseg->segno == segno) { 229 sum = curseg->sum_blk->entries[blkoff]; 230 break; 231 } 232 } 233 if (i > CURSEG_COLD_DATA) { 234 struct page *sum_page = get_sum_page(sbi, segno); 235 struct f2fs_summary_block *sum_node; 236 kaddr = page_address(sum_page); 237 sum_node = (struct f2fs_summary_block *)kaddr; 238 sum = sum_node->entries[blkoff]; 239 f2fs_put_page(sum_page, 1); 240 } 241 242 /* Use the locked dnode page and inode */ 243 nid = le32_to_cpu(sum.nid); 244 if (dn->inode->i_ino == nid) { 245 struct dnode_of_data tdn = *dn; 246 tdn.nid = nid; 247 tdn.node_page = dn->inode_page; 248 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); 249 truncate_data_blocks_range(&tdn, 1); 250 return 0; 251 } else if (dn->nid == nid) { 252 struct dnode_of_data tdn = *dn; 253 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); 254 truncate_data_blocks_range(&tdn, 1); 255 return 0; 256 } 257 258 /* Get the node page */ 259 node_page = get_node_page(sbi, nid); 260 if (IS_ERR(node_page)) 261 return PTR_ERR(node_page); 262 bidx = start_bidx_of_node(ofs_of_node(node_page)) + 263 le16_to_cpu(sum.ofs_in_node); 264 ino = ino_of_node(node_page); 265 f2fs_put_page(node_page, 1); 266 267 /* Deallocate previous index in the node page */ 268 inode = f2fs_iget(sbi->sb, ino); 269 if (IS_ERR(inode)) 270 return PTR_ERR(inode); 271 272 truncate_hole(inode, bidx, bidx + 1); 273 iput(inode); 274 return 0; 275 } 276 277 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, 278 struct page *page, block_t blkaddr) 279 { 280 unsigned int start, end; 281 struct dnode_of_data dn; 282 struct f2fs_summary sum; 283 struct node_info ni; 284 int err = 0, recovered = 0; 285 int ilock; 286 287 start = start_bidx_of_node(ofs_of_node(page)); 288 if (IS_INODE(page)) 289 end = start + ADDRS_PER_INODE; 290 else 291 end = start + ADDRS_PER_BLOCK; 292 293 ilock = mutex_lock_op(sbi); 294 set_new_dnode(&dn, inode, NULL, NULL, 0); 295 296 err = get_dnode_of_data(&dn, start, ALLOC_NODE); 297 if (err) { 298 mutex_unlock_op(sbi, ilock); 299 return err; 300 } 301 302 wait_on_page_writeback(dn.node_page); 303 304 get_node_info(sbi, dn.nid, &ni); 305 BUG_ON(ni.ino != ino_of_node(page)); 306 BUG_ON(ofs_of_node(dn.node_page) != ofs_of_node(page)); 307 308 for (; start < end; start++) { 309 block_t src, dest; 310 311 src = datablock_addr(dn.node_page, dn.ofs_in_node); 312 dest = datablock_addr(page, dn.ofs_in_node); 313 314 if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) { 315 if (src == NULL_ADDR) { 316 int err = reserve_new_block(&dn); 317 /* We should not get -ENOSPC */ 318 BUG_ON(err); 319 } 320 321 /* Check the previous node page having this index */ 322 err = check_index_in_prev_nodes(sbi, dest, &dn); 323 if (err) 324 goto err; 325 326 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); 327 328 /* write dummy data page */ 329 recover_data_page(sbi, NULL, &sum, src, dest); 330 update_extent_cache(dest, &dn); 331 recovered++; 332 } 333 dn.ofs_in_node++; 334 } 335 336 /* write node page in place */ 337 set_summary(&sum, dn.nid, 0, 0); 338 if (IS_INODE(dn.node_page)) 339 sync_inode_page(&dn); 340 341 copy_node_footer(dn.node_page, page); 342 fill_node_footer(dn.node_page, dn.nid, ni.ino, 343 ofs_of_node(page), false); 344 set_page_dirty(dn.node_page); 345 346 recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr); 347 err: 348 f2fs_put_dnode(&dn); 349 mutex_unlock_op(sbi, ilock); 350 351 f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, " 352 "recovered_data = %d blocks, err = %d", 353 inode->i_ino, recovered, err); 354 return err; 355 } 356 357 static int recover_data(struct f2fs_sb_info *sbi, 358 struct list_head *head, int type) 359 { 360 unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver); 361 struct curseg_info *curseg; 362 struct page *page; 363 int err = 0; 364 block_t blkaddr; 365 366 /* get node pages in the current segment */ 367 curseg = CURSEG_I(sbi, type); 368 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 369 370 /* read node page */ 371 page = alloc_page(GFP_NOFS | __GFP_ZERO); 372 if (IS_ERR(page)) 373 return -ENOMEM; 374 375 lock_page(page); 376 377 while (1) { 378 struct fsync_inode_entry *entry; 379 380 err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC); 381 if (err) 382 goto out; 383 384 lock_page(page); 385 386 if (cp_ver != cpver_of_node(page)) 387 break; 388 389 entry = get_fsync_inode(head, ino_of_node(page)); 390 if (!entry) 391 goto next; 392 393 err = do_recover_data(sbi, entry->inode, page, blkaddr); 394 if (err) 395 break; 396 397 if (entry->blkaddr == blkaddr) { 398 iput(entry->inode); 399 list_del(&entry->list); 400 kmem_cache_free(fsync_entry_slab, entry); 401 } 402 next: 403 /* check next segment */ 404 blkaddr = next_blkaddr_of_node(page); 405 } 406 unlock_page(page); 407 out: 408 __free_pages(page, 0); 409 410 if (!err) 411 allocate_new_segments(sbi); 412 return err; 413 } 414 415 int recover_fsync_data(struct f2fs_sb_info *sbi) 416 { 417 struct list_head inode_list; 418 int err; 419 420 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", 421 sizeof(struct fsync_inode_entry), NULL); 422 if (unlikely(!fsync_entry_slab)) 423 return -ENOMEM; 424 425 INIT_LIST_HEAD(&inode_list); 426 427 /* step #1: find fsynced inode numbers */ 428 sbi->por_doing = 1; 429 err = find_fsync_dnodes(sbi, &inode_list); 430 if (err) 431 goto out; 432 433 if (list_empty(&inode_list)) 434 goto out; 435 436 /* step #2: recover data */ 437 err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE); 438 BUG_ON(!list_empty(&inode_list)); 439 out: 440 destroy_fsync_dnodes(&inode_list); 441 kmem_cache_destroy(fsync_entry_slab); 442 sbi->por_doing = 0; 443 if (!err) 444 write_checkpoint(sbi, false); 445 return err; 446 } 447