recovery.c (393ff91f57c87d48ffed30878be6e3e486d3a00a) recovery.c (6ead114232f786e3ef7a034c8617f2a4df8e5226)
1/*
2 * fs/f2fs/recovery.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 104 unchanged lines hidden (view full) ---

113 struct fsync_inode_entry *entry;
114
115 err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
116 if (err)
117 goto out;
118
119 lock_page(page);
120
1/*
2 * fs/f2fs/recovery.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 104 unchanged lines hidden (view full) ---

113 struct fsync_inode_entry *entry;
114
115 err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
116 if (err)
117 goto out;
118
119 lock_page(page);
120
121 if (cp_ver != cpver_of_node(page)) {
122 err = -EINVAL;
121 if (cp_ver != cpver_of_node(page))
123 goto unlock_out;
122 goto unlock_out;
124 }
125
126 if (!is_fsync_dnode(page))
127 goto next;
128
129 entry = get_fsync_inode(head, ino_of_node(page));
130 if (entry) {
131 entry->blkaddr = blkaddr;
132 if (IS_INODE(page) && is_dent_dnode(page))
133 set_inode_flag(F2FS_I(entry->inode),
134 FI_INC_LINK);
135 } else {
136 if (IS_INODE(page) && is_dent_dnode(page)) {
123
124 if (!is_fsync_dnode(page))
125 goto next;
126
127 entry = get_fsync_inode(head, ino_of_node(page));
128 if (entry) {
129 entry->blkaddr = blkaddr;
130 if (IS_INODE(page) && is_dent_dnode(page))
131 set_inode_flag(F2FS_I(entry->inode),
132 FI_INC_LINK);
133 } else {
134 if (IS_INODE(page) && is_dent_dnode(page)) {
137 if (recover_inode_page(sbi, page)) {
138 err = -ENOMEM;
135 err = recover_inode_page(sbi, page);
136 if (err)
139 goto unlock_out;
137 goto unlock_out;
140 }
141 }
142
143 /* add this fsync inode to the list */
144 entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
145 if (!entry) {
146 err = -ENOMEM;
147 goto unlock_out;
148 }

--- 83 unchanged lines hidden (view full) ---

232 inode = f2fs_iget(sbi->sb, ino);
233 if (IS_ERR(inode))
234 return;
235
236 truncate_hole(inode, bidx, bidx + 1);
237 iput(inode);
238}
239
138 }
139
140 /* add this fsync inode to the list */
141 entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
142 if (!entry) {
143 err = -ENOMEM;
144 goto unlock_out;
145 }

--- 83 unchanged lines hidden (view full) ---

229 inode = f2fs_iget(sbi->sb, ino);
230 if (IS_ERR(inode))
231 return;
232
233 truncate_hole(inode, bidx, bidx + 1);
234 iput(inode);
235}
236
240static void do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
237static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
241 struct page *page, block_t blkaddr)
242{
243 unsigned int start, end;
244 struct dnode_of_data dn;
245 struct f2fs_summary sum;
246 struct node_info ni;
238 struct page *page, block_t blkaddr)
239{
240 unsigned int start, end;
241 struct dnode_of_data dn;
242 struct f2fs_summary sum;
243 struct node_info ni;
244 int err = 0;
247
248 start = start_bidx_of_node(ofs_of_node(page));
249 if (IS_INODE(page))
250 end = start + ADDRS_PER_INODE;
251 else
252 end = start + ADDRS_PER_BLOCK;
253
254 set_new_dnode(&dn, inode, NULL, NULL, 0);
245
246 start = start_bidx_of_node(ofs_of_node(page));
247 if (IS_INODE(page))
248 end = start + ADDRS_PER_INODE;
249 else
250 end = start + ADDRS_PER_BLOCK;
251
252 set_new_dnode(&dn, inode, NULL, NULL, 0);
255 if (get_dnode_of_data(&dn, start, ALLOC_NODE))
256 return;
253 err = get_dnode_of_data(&dn, start, ALLOC_NODE);
254 if (err)
255 return err;
257
258 wait_on_page_writeback(dn.node_page);
259
260 get_node_info(sbi, dn.nid, &ni);
261 BUG_ON(ni.ino != ino_of_node(page));
262 BUG_ON(ofs_of_node(dn.node_page) != ofs_of_node(page));
263
264 for (; start < end; start++) {

--- 28 unchanged lines hidden (view full) ---

293
294 copy_node_footer(dn.node_page, page);
295 fill_node_footer(dn.node_page, dn.nid, ni.ino,
296 ofs_of_node(page), false);
297 set_page_dirty(dn.node_page);
298
299 recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
300 f2fs_put_dnode(&dn);
256
257 wait_on_page_writeback(dn.node_page);
258
259 get_node_info(sbi, dn.nid, &ni);
260 BUG_ON(ni.ino != ino_of_node(page));
261 BUG_ON(ofs_of_node(dn.node_page) != ofs_of_node(page));
262
263 for (; start < end; start++) {

--- 28 unchanged lines hidden (view full) ---

292
293 copy_node_footer(dn.node_page, page);
294 fill_node_footer(dn.node_page, dn.nid, ni.ino,
295 ofs_of_node(page), false);
296 set_page_dirty(dn.node_page);
297
298 recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
299 f2fs_put_dnode(&dn);
300 return 0;
301}
302
301}
302
303static void recover_data(struct f2fs_sb_info *sbi,
303static int recover_data(struct f2fs_sb_info *sbi,
304 struct list_head *head, int type)
305{
306 unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
307 struct curseg_info *curseg;
308 struct page *page;
304 struct list_head *head, int type)
305{
306 unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
307 struct curseg_info *curseg;
308 struct page *page;
309 int err = 0;
309 block_t blkaddr;
310
311 /* get node pages in the current segment */
312 curseg = CURSEG_I(sbi, type);
313 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
314
315 /* read node page */
316 page = alloc_page(GFP_NOFS | __GFP_ZERO);
317 if (IS_ERR(page))
310 block_t blkaddr;
311
312 /* get node pages in the current segment */
313 curseg = CURSEG_I(sbi, type);
314 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
315
316 /* read node page */
317 page = alloc_page(GFP_NOFS | __GFP_ZERO);
318 if (IS_ERR(page))
318 return;
319 return -ENOMEM;
320
319 lock_page(page);
320
321 while (1) {
322 struct fsync_inode_entry *entry;
323
321 lock_page(page);
322
323 while (1) {
324 struct fsync_inode_entry *entry;
325
324 if (f2fs_readpage(sbi, page, blkaddr, READ_SYNC))
326 err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
327 if (err)
325 goto out;
326
327 lock_page(page);
328
329 if (cp_ver != cpver_of_node(page))
330 goto unlock_out;
331
332 entry = get_fsync_inode(head, ino_of_node(page));
333 if (!entry)
334 goto next;
335
328 goto out;
329
330 lock_page(page);
331
332 if (cp_ver != cpver_of_node(page))
333 goto unlock_out;
334
335 entry = get_fsync_inode(head, ino_of_node(page));
336 if (!entry)
337 goto next;
338
336 do_recover_data(sbi, entry->inode, page, blkaddr);
339 err = do_recover_data(sbi, entry->inode, page, blkaddr);
340 if (err)
341 goto out;
337
338 if (entry->blkaddr == blkaddr) {
339 iput(entry->inode);
340 list_del(&entry->list);
341 kmem_cache_free(fsync_entry_slab, entry);
342 }
343next:
344 /* check next segment */
345 blkaddr = next_blkaddr_of_node(page);
346 }
347unlock_out:
348 unlock_page(page);
349out:
350 __free_pages(page, 0);
351
342
343 if (entry->blkaddr == blkaddr) {
344 iput(entry->inode);
345 list_del(&entry->list);
346 kmem_cache_free(fsync_entry_slab, entry);
347 }
348next:
349 /* check next segment */
350 blkaddr = next_blkaddr_of_node(page);
351 }
352unlock_out:
353 unlock_page(page);
354out:
355 __free_pages(page, 0);
356
352 allocate_new_segments(sbi);
357 if (!err)
358 allocate_new_segments(sbi);
359 return err;
353}
354
360}
361
355void recover_fsync_data(struct f2fs_sb_info *sbi)
362int recover_fsync_data(struct f2fs_sb_info *sbi)
356{
357 struct list_head inode_list;
363{
364 struct list_head inode_list;
365 int err;
358
359 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
360 sizeof(struct fsync_inode_entry), NULL);
361 if (unlikely(!fsync_entry_slab))
366
367 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
368 sizeof(struct fsync_inode_entry), NULL);
369 if (unlikely(!fsync_entry_slab))
362 return;
370 return -ENOMEM;
363
364 INIT_LIST_HEAD(&inode_list);
365
366 /* step #1: find fsynced inode numbers */
371
372 INIT_LIST_HEAD(&inode_list);
373
374 /* step #1: find fsynced inode numbers */
367 if (find_fsync_dnodes(sbi, &inode_list))
375 err = find_fsync_dnodes(sbi, &inode_list);
376 if (err)
368 goto out;
369
370 if (list_empty(&inode_list))
371 goto out;
372
373 /* step #2: recover data */
374 sbi->por_doing = 1;
377 goto out;
378
379 if (list_empty(&inode_list))
380 goto out;
381
382 /* step #2: recover data */
383 sbi->por_doing = 1;
375 recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
384 err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
376 sbi->por_doing = 0;
377 BUG_ON(!list_empty(&inode_list));
378out:
379 destroy_fsync_dnodes(sbi, &inode_list);
380 kmem_cache_destroy(fsync_entry_slab);
381 write_checkpoint(sbi, false);
385 sbi->por_doing = 0;
386 BUG_ON(!list_empty(&inode_list));
387out:
388 destroy_fsync_dnodes(sbi, &inode_list);
389 kmem_cache_destroy(fsync_entry_slab);
390 write_checkpoint(sbi, false);
391 return err;
382}
392}