checkpoint.c (b10778a00d40b3d9fdaaf5891e802794781ff71c) checkpoint.c (67298804f34452a53a9ec9e609d95aa35084132b)
1/*
2 * fs/f2fs/checkpoint.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 58 unchanged lines hidden (view full) ---

67 if (unlikely(page->mapping != mapping)) {
68 f2fs_put_page(page, 1);
69 goto repeat;
70 }
71out:
72 return page;
73}
74
1/*
2 * fs/f2fs/checkpoint.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 58 unchanged lines hidden (view full) ---

67 if (unlikely(page->mapping != mapping)) {
68 f2fs_put_page(page, 1);
69 goto repeat;
70 }
71out:
72 return page;
73}
74
75static inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
75struct page *get_meta_page_ra(struct f2fs_sb_info *sbi, pgoff_t index)
76{
76{
77 bool readahead = false;
78 struct page *page;
79
80 page = find_get_page(META_MAPPING(sbi), index);
81 if (!page || (page && !PageUptodate(page)))
82 readahead = true;
83 f2fs_put_page(page, 0);
84
85 if (readahead)
86 ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR);
87 return get_meta_page(sbi, index);
88}
89
90static inline block_t get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
91{
77 switch (type) {
78 case META_NAT:
79 return NM_I(sbi)->max_nid / NAT_ENTRY_PER_BLOCK;
80 case META_SIT:
81 return SIT_BLK_CNT(sbi);
82 case META_SSA:
83 case META_CP:
84 return 0;
92 switch (type) {
93 case META_NAT:
94 return NM_I(sbi)->max_nid / NAT_ENTRY_PER_BLOCK;
95 case META_SIT:
96 return SIT_BLK_CNT(sbi);
97 case META_SSA:
98 case META_CP:
99 return 0;
100 case META_POR:
101 return MAX_BLKADDR(sbi);
85 default:
86 BUG();
87 }
88}
89
90/*
91 * Readahead CP/NAT/SIT/SSA pages
92 */
102 default:
103 BUG();
104 }
105}
106
107/*
108 * Readahead CP/NAT/SIT/SSA pages
109 */
93int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type)
110int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type)
94{
95 block_t prev_blk_addr = 0;
96 struct page *page;
111{
112 block_t prev_blk_addr = 0;
113 struct page *page;
97 int blkno = start;
98 int max_blks = get_max_meta_blks(sbi, type);
114 block_t blkno = start;
115 block_t max_blks = get_max_meta_blks(sbi, type);
99
100 struct f2fs_io_info fio = {
101 .type = META,
102 .rw = READ_SYNC | REQ_META | REQ_PRIO
103 };
104
105 for (; nrpages-- > 0; blkno++) {
106 block_t blk_addr;

--- 13 unchanged lines hidden (view full) ---

120 blk_addr = current_sit_addr(sbi,
121 blkno * SIT_ENTRY_PER_BLOCK);
122 if (blkno != start && prev_blk_addr + 1 != blk_addr)
123 goto out;
124 prev_blk_addr = blk_addr;
125 break;
126 case META_SSA:
127 case META_CP:
116
117 struct f2fs_io_info fio = {
118 .type = META,
119 .rw = READ_SYNC | REQ_META | REQ_PRIO
120 };
121
122 for (; nrpages-- > 0; blkno++) {
123 block_t blk_addr;

--- 13 unchanged lines hidden (view full) ---

137 blk_addr = current_sit_addr(sbi,
138 blkno * SIT_ENTRY_PER_BLOCK);
139 if (blkno != start && prev_blk_addr + 1 != blk_addr)
140 goto out;
141 prev_blk_addr = blk_addr;
142 break;
143 case META_SSA:
144 case META_CP:
128 /* get ssa/cp block addr */
145 case META_POR:
146 if (unlikely(blkno >= max_blks))
147 goto out;
148 if (unlikely(blkno < SEG0_BLKADDR(sbi)))
149 goto out;
129 blk_addr = blkno;
130 break;
131 default:
132 BUG();
133 }
134
135 page = grab_cache_page(META_MAPPING(sbi), blk_addr);
136 if (!page)

--- 9 unchanged lines hidden (view full) ---

146out:
147 f2fs_submit_merged_bio(sbi, META, READ);
148 return blkno - start;
149}
150
151static int f2fs_write_meta_page(struct page *page,
152 struct writeback_control *wbc)
153{
150 blk_addr = blkno;
151 break;
152 default:
153 BUG();
154 }
155
156 page = grab_cache_page(META_MAPPING(sbi), blk_addr);
157 if (!page)

--- 9 unchanged lines hidden (view full) ---

167out:
168 f2fs_submit_merged_bio(sbi, META, READ);
169 return blkno - start;
170}
171
172static int f2fs_write_meta_page(struct page *page,
173 struct writeback_control *wbc)
174{
154 struct inode *inode = page->mapping->host;
155 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
175 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
156
157 trace_f2fs_writepage(page, META);
158
159 if (unlikely(sbi->por_doing))
160 goto redirty_out;
161 if (wbc->for_reclaim)
162 goto redirty_out;
163 if (unlikely(f2fs_cp_error(sbi)))

--- 8 unchanged lines hidden (view full) ---

172redirty_out:
173 redirty_page_for_writepage(wbc, page);
174 return AOP_WRITEPAGE_ACTIVATE;
175}
176
177static int f2fs_write_meta_pages(struct address_space *mapping,
178 struct writeback_control *wbc)
179{
176
177 trace_f2fs_writepage(page, META);
178
179 if (unlikely(sbi->por_doing))
180 goto redirty_out;
181 if (wbc->for_reclaim)
182 goto redirty_out;
183 if (unlikely(f2fs_cp_error(sbi)))

--- 8 unchanged lines hidden (view full) ---

192redirty_out:
193 redirty_page_for_writepage(wbc, page);
194 return AOP_WRITEPAGE_ACTIVATE;
195}
196
197static int f2fs_write_meta_pages(struct address_space *mapping,
198 struct writeback_control *wbc)
199{
180 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
200 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
181 long diff, written;
182
183 trace_f2fs_writepages(mapping->host, wbc, META);
184
185 /* collect a number of dirty meta pages and write together */
186 if (wbc->for_kupdate ||
187 get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
188 goto skip_write;

--- 65 unchanged lines hidden (view full) ---

254 if (nwritten)
255 f2fs_submit_merged_bio(sbi, type, WRITE);
256
257 return nwritten;
258}
259
260static int f2fs_set_meta_page_dirty(struct page *page)
261{
201 long diff, written;
202
203 trace_f2fs_writepages(mapping->host, wbc, META);
204
205 /* collect a number of dirty meta pages and write together */
206 if (wbc->for_kupdate ||
207 get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
208 goto skip_write;

--- 65 unchanged lines hidden (view full) ---

274 if (nwritten)
275 f2fs_submit_merged_bio(sbi, type, WRITE);
276
277 return nwritten;
278}
279
280static int f2fs_set_meta_page_dirty(struct page *page)
281{
262 struct address_space *mapping = page->mapping;
263 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
264
265 trace_f2fs_set_page_dirty(page, META);
266
267 SetPageUptodate(page);
268 if (!PageDirty(page)) {
269 __set_page_dirty_nobuffers(page);
282 trace_f2fs_set_page_dirty(page, META);
283
284 SetPageUptodate(page);
285 if (!PageDirty(page)) {
286 __set_page_dirty_nobuffers(page);
270 inc_page_count(sbi, F2FS_DIRTY_META);
287 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
271 return 1;
272 }
273 return 0;
274}
275
276const struct address_space_operations f2fs_meta_aops = {
277 .writepage = f2fs_write_meta_page,
278 .writepages = f2fs_write_meta_pages,
279 .set_page_dirty = f2fs_set_meta_page_dirty,
280};
281
282static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
283{
288 return 1;
289 }
290 return 0;
291}
292
293const struct address_space_operations f2fs_meta_aops = {
294 .writepage = f2fs_write_meta_page,
295 .writepages = f2fs_write_meta_pages,
296 .set_page_dirty = f2fs_set_meta_page_dirty,
297};
298
299static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
300{
301 struct inode_management *im = &sbi->im[type];
284 struct ino_entry *e;
285retry:
302 struct ino_entry *e;
303retry:
286 spin_lock(&sbi->ino_lock[type]);
304 spin_lock(&im->ino_lock);
287
305
288 e = radix_tree_lookup(&sbi->ino_root[type], ino);
306 e = radix_tree_lookup(&im->ino_root, ino);
289 if (!e) {
290 e = kmem_cache_alloc(ino_entry_slab, GFP_ATOMIC);
291 if (!e) {
307 if (!e) {
308 e = kmem_cache_alloc(ino_entry_slab, GFP_ATOMIC);
309 if (!e) {
292 spin_unlock(&sbi->ino_lock[type]);
310 spin_unlock(&im->ino_lock);
293 goto retry;
294 }
311 goto retry;
312 }
295 if (radix_tree_insert(&sbi->ino_root[type], ino, e)) {
296 spin_unlock(&sbi->ino_lock[type]);
313 if (radix_tree_insert(&im->ino_root, ino, e)) {
314 spin_unlock(&im->ino_lock);
297 kmem_cache_free(ino_entry_slab, e);
298 goto retry;
299 }
300 memset(e, 0, sizeof(struct ino_entry));
301 e->ino = ino;
302
315 kmem_cache_free(ino_entry_slab, e);
316 goto retry;
317 }
318 memset(e, 0, sizeof(struct ino_entry));
319 e->ino = ino;
320
303 list_add_tail(&e->list, &sbi->ino_list[type]);
321 list_add_tail(&e->list, &im->ino_list);
322 if (type != ORPHAN_INO)
323 im->ino_num++;
304 }
324 }
305 spin_unlock(&sbi->ino_lock[type]);
325 spin_unlock(&im->ino_lock);
306}
307
308static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
309{
326}
327
328static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
329{
330 struct inode_management *im = &sbi->im[type];
310 struct ino_entry *e;
311
331 struct ino_entry *e;
332
312 spin_lock(&sbi->ino_lock[type]);
313 e = radix_tree_lookup(&sbi->ino_root[type], ino);
333 spin_lock(&im->ino_lock);
334 e = radix_tree_lookup(&im->ino_root, ino);
314 if (e) {
315 list_del(&e->list);
335 if (e) {
336 list_del(&e->list);
316 radix_tree_delete(&sbi->ino_root[type], ino);
317 if (type == ORPHAN_INO)
318 sbi->n_orphans--;
319 spin_unlock(&sbi->ino_lock[type]);
337 radix_tree_delete(&im->ino_root, ino);
338 im->ino_num--;
339 spin_unlock(&im->ino_lock);
320 kmem_cache_free(ino_entry_slab, e);
321 return;
322 }
340 kmem_cache_free(ino_entry_slab, e);
341 return;
342 }
323 spin_unlock(&sbi->ino_lock[type]);
343 spin_unlock(&im->ino_lock);
324}
325
326void add_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
327{
328 /* add new dirty ino entry into list */
329 __add_ino_entry(sbi, ino, type);
330}
331
332void remove_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
333{
334 /* remove dirty ino entry from list */
335 __remove_ino_entry(sbi, ino, type);
336}
337
338/* mode should be APPEND_INO or UPDATE_INO */
339bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
340{
344}
345
346void add_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
347{
348 /* add new dirty ino entry into list */
349 __add_ino_entry(sbi, ino, type);
350}
351
352void remove_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
353{
354 /* remove dirty ino entry from list */
355 __remove_ino_entry(sbi, ino, type);
356}
357
358/* mode should be APPEND_INO or UPDATE_INO */
359bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
360{
361 struct inode_management *im = &sbi->im[mode];
341 struct ino_entry *e;
362 struct ino_entry *e;
342 spin_lock(&sbi->ino_lock[mode]);
343 e = radix_tree_lookup(&sbi->ino_root[mode], ino);
344 spin_unlock(&sbi->ino_lock[mode]);
363
364 spin_lock(&im->ino_lock);
365 e = radix_tree_lookup(&im->ino_root, ino);
366 spin_unlock(&im->ino_lock);
345 return e ? true : false;
346}
347
348void release_dirty_inode(struct f2fs_sb_info *sbi)
349{
350 struct ino_entry *e, *tmp;
351 int i;
352
353 for (i = APPEND_INO; i <= UPDATE_INO; i++) {
367 return e ? true : false;
368}
369
370void release_dirty_inode(struct f2fs_sb_info *sbi)
371{
372 struct ino_entry *e, *tmp;
373 int i;
374
375 for (i = APPEND_INO; i <= UPDATE_INO; i++) {
354 spin_lock(&sbi->ino_lock[i]);
355 list_for_each_entry_safe(e, tmp, &sbi->ino_list[i], list) {
376 struct inode_management *im = &sbi->im[i];
377
378 spin_lock(&im->ino_lock);
379 list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
356 list_del(&e->list);
380 list_del(&e->list);
357 radix_tree_delete(&sbi->ino_root[i], e->ino);
381 radix_tree_delete(&im->ino_root, e->ino);
358 kmem_cache_free(ino_entry_slab, e);
382 kmem_cache_free(ino_entry_slab, e);
383 im->ino_num--;
359 }
384 }
360 spin_unlock(&sbi->ino_lock[i]);
385 spin_unlock(&im->ino_lock);
361 }
362}
363
364int acquire_orphan_inode(struct f2fs_sb_info *sbi)
365{
386 }
387}
388
389int acquire_orphan_inode(struct f2fs_sb_info *sbi)
390{
391 struct inode_management *im = &sbi->im[ORPHAN_INO];
366 int err = 0;
367
392 int err = 0;
393
368 spin_lock(&sbi->ino_lock[ORPHAN_INO]);
369 if (unlikely(sbi->n_orphans >= sbi->max_orphans))
394 spin_lock(&im->ino_lock);
395 if (unlikely(im->ino_num >= sbi->max_orphans))
370 err = -ENOSPC;
371 else
396 err = -ENOSPC;
397 else
372 sbi->n_orphans++;
373 spin_unlock(&sbi->ino_lock[ORPHAN_INO]);
398 im->ino_num++;
399 spin_unlock(&im->ino_lock);
374
375 return err;
376}
377
378void release_orphan_inode(struct f2fs_sb_info *sbi)
379{
400
401 return err;
402}
403
404void release_orphan_inode(struct f2fs_sb_info *sbi)
405{
380 spin_lock(&sbi->ino_lock[ORPHAN_INO]);
381 f2fs_bug_on(sbi->n_orphans == 0);
382 sbi->n_orphans--;
383 spin_unlock(&sbi->ino_lock[ORPHAN_INO]);
406 struct inode_management *im = &sbi->im[ORPHAN_INO];
407
408 spin_lock(&im->ino_lock);
409 f2fs_bug_on(sbi, im->ino_num == 0);
410 im->ino_num--;
411 spin_unlock(&im->ino_lock);
384}
385
386void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
387{
388 /* add new orphan ino entry into list */
389 __add_ino_entry(sbi, ino, ORPHAN_INO);
390}
391
392void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
393{
394 /* remove orphan entry from orphan list */
395 __remove_ino_entry(sbi, ino, ORPHAN_INO);
396}
397
398static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
399{
400 struct inode *inode = f2fs_iget(sbi->sb, ino);
412}
413
414void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
415{
416 /* add new orphan ino entry into list */
417 __add_ino_entry(sbi, ino, ORPHAN_INO);
418}
419
420void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
421{
422 /* remove orphan entry from orphan list */
423 __remove_ino_entry(sbi, ino, ORPHAN_INO);
424}
425
426static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
427{
428 struct inode *inode = f2fs_iget(sbi->sb, ino);
401 f2fs_bug_on(IS_ERR(inode));
429 f2fs_bug_on(sbi, IS_ERR(inode));
402 clear_nlink(inode);
403
404 /* truncate all the data during iput */
405 iput(inode);
406}
407
408void recover_orphan_inodes(struct f2fs_sb_info *sbi)
409{

--- 28 unchanged lines hidden (view full) ---

438}
439
440static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
441{
442 struct list_head *head;
443 struct f2fs_orphan_block *orphan_blk = NULL;
444 unsigned int nentries = 0;
445 unsigned short index;
430 clear_nlink(inode);
431
432 /* truncate all the data during iput */
433 iput(inode);
434}
435
436void recover_orphan_inodes(struct f2fs_sb_info *sbi)
437{

--- 28 unchanged lines hidden (view full) ---

466}
467
468static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
469{
470 struct list_head *head;
471 struct f2fs_orphan_block *orphan_blk = NULL;
472 unsigned int nentries = 0;
473 unsigned short index;
446 unsigned short orphan_blocks =
447 (unsigned short)GET_ORPHAN_BLOCKS(sbi->n_orphans);
474 unsigned short orphan_blocks;
448 struct page *page = NULL;
449 struct ino_entry *orphan = NULL;
475 struct page *page = NULL;
476 struct ino_entry *orphan = NULL;
477 struct inode_management *im = &sbi->im[ORPHAN_INO];
450
478
479 orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
480
451 for (index = 0; index < orphan_blocks; index++)
452 grab_meta_page(sbi, start_blk + index);
453
454 index = 1;
481 for (index = 0; index < orphan_blocks; index++)
482 grab_meta_page(sbi, start_blk + index);
483
484 index = 1;
455 spin_lock(&sbi->ino_lock[ORPHAN_INO]);
456 head = &sbi->ino_list[ORPHAN_INO];
485 spin_lock(&im->ino_lock);
486 head = &im->ino_list;
457
458 /* loop for each orphan inode entry and write them in Jornal block */
459 list_for_each_entry(orphan, head, list) {
460 if (!page) {
461 page = find_get_page(META_MAPPING(sbi), start_blk++);
487
488 /* loop for each orphan inode entry and write them in Jornal block */
489 list_for_each_entry(orphan, head, list) {
490 if (!page) {
491 page = find_get_page(META_MAPPING(sbi), start_blk++);
462 f2fs_bug_on(!page);
492 f2fs_bug_on(sbi, !page);
463 orphan_blk =
464 (struct f2fs_orphan_block *)page_address(page);
465 memset(orphan_blk, 0, sizeof(*orphan_blk));
466 f2fs_put_page(page, 0);
467 }
468
469 orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
470

--- 17 unchanged lines hidden (view full) ---

488 if (page) {
489 orphan_blk->blk_addr = cpu_to_le16(index);
490 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
491 orphan_blk->entry_count = cpu_to_le32(nentries);
492 set_page_dirty(page);
493 f2fs_put_page(page, 1);
494 }
495
493 orphan_blk =
494 (struct f2fs_orphan_block *)page_address(page);
495 memset(orphan_blk, 0, sizeof(*orphan_blk));
496 f2fs_put_page(page, 0);
497 }
498
499 orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
500

--- 17 unchanged lines hidden (view full) ---

518 if (page) {
519 orphan_blk->blk_addr = cpu_to_le16(index);
520 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
521 orphan_blk->entry_count = cpu_to_le32(nentries);
522 set_page_dirty(page);
523 f2fs_put_page(page, 1);
524 }
525
496 spin_unlock(&sbi->ino_lock[ORPHAN_INO]);
526 spin_unlock(&im->ino_lock);
497}
498
499static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
500 block_t cp_addr, unsigned long long *version)
501{
502 struct page *cp_page_1, *cp_page_2 = NULL;
503 unsigned long blk_size = sbi->blocksize;
504 struct f2fs_checkpoint *cp_block;

--- 109 unchanged lines hidden (view full) ---

614
615fail_no_cp:
616 kfree(sbi->ckpt);
617 return -EINVAL;
618}
619
620static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new)
621{
527}
528
529static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
530 block_t cp_addr, unsigned long long *version)
531{
532 struct page *cp_page_1, *cp_page_2 = NULL;
533 unsigned long blk_size = sbi->blocksize;
534 struct f2fs_checkpoint *cp_block;

--- 109 unchanged lines hidden (view full) ---

644
645fail_no_cp:
646 kfree(sbi->ckpt);
647 return -EINVAL;
648}
649
650static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new)
651{
622 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
652 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
623
624 if (is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR))
625 return -EEXIST;
626
627 set_inode_flag(F2FS_I(inode), FI_DIRTY_DIR);
628 F2FS_I(inode)->dirty_dir = new;
629 list_add_tail(&new->list, &sbi->dir_inode_list);
630 stat_inc_dirty_dir(sbi);
631 return 0;
632}
633
653
654 if (is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR))
655 return -EEXIST;
656
657 set_inode_flag(F2FS_I(inode), FI_DIRTY_DIR);
658 F2FS_I(inode)->dirty_dir = new;
659 list_add_tail(&new->list, &sbi->dir_inode_list);
660 stat_inc_dirty_dir(sbi);
661 return 0;
662}
663
634void set_dirty_dir_page(struct inode *inode, struct page *page)
664void update_dirty_page(struct inode *inode, struct page *page)
635{
665{
636 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
666 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
637 struct dir_inode_entry *new;
638 int ret = 0;
639
667 struct dir_inode_entry *new;
668 int ret = 0;
669
640 if (!S_ISDIR(inode->i_mode))
670 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode))
641 return;
642
671 return;
672
673 if (!S_ISDIR(inode->i_mode)) {
674 inode_inc_dirty_pages(inode);
675 goto out;
676 }
677
643 new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
644 new->inode = inode;
645 INIT_LIST_HEAD(&new->list);
646
647 spin_lock(&sbi->dir_inode_lock);
648 ret = __add_dirty_inode(inode, new);
678 new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
679 new->inode = inode;
680 INIT_LIST_HEAD(&new->list);
681
682 spin_lock(&sbi->dir_inode_lock);
683 ret = __add_dirty_inode(inode, new);
649 inode_inc_dirty_dents(inode);
650 SetPagePrivate(page);
684 inode_inc_dirty_pages(inode);
651 spin_unlock(&sbi->dir_inode_lock);
652
653 if (ret)
654 kmem_cache_free(inode_entry_slab, new);
685 spin_unlock(&sbi->dir_inode_lock);
686
687 if (ret)
688 kmem_cache_free(inode_entry_slab, new);
689out:
690 SetPagePrivate(page);
655}
656
657void add_dirty_dir_inode(struct inode *inode)
658{
691}
692
693void add_dirty_dir_inode(struct inode *inode)
694{
659 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
695 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
660 struct dir_inode_entry *new =
661 f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
662 int ret = 0;
663
664 new->inode = inode;
665 INIT_LIST_HEAD(&new->list);
666
667 spin_lock(&sbi->dir_inode_lock);
668 ret = __add_dirty_inode(inode, new);
669 spin_unlock(&sbi->dir_inode_lock);
670
671 if (ret)
672 kmem_cache_free(inode_entry_slab, new);
673}
674
675void remove_dirty_dir_inode(struct inode *inode)
676{
696 struct dir_inode_entry *new =
697 f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
698 int ret = 0;
699
700 new->inode = inode;
701 INIT_LIST_HEAD(&new->list);
702
703 spin_lock(&sbi->dir_inode_lock);
704 ret = __add_dirty_inode(inode, new);
705 spin_unlock(&sbi->dir_inode_lock);
706
707 if (ret)
708 kmem_cache_free(inode_entry_slab, new);
709}
710
711void remove_dirty_dir_inode(struct inode *inode)
712{
677 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
713 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
678 struct dir_inode_entry *entry;
679
680 if (!S_ISDIR(inode->i_mode))
681 return;
682
683 spin_lock(&sbi->dir_inode_lock);
714 struct dir_inode_entry *entry;
715
716 if (!S_ISDIR(inode->i_mode))
717 return;
718
719 spin_lock(&sbi->dir_inode_lock);
684 if (get_dirty_dents(inode) ||
720 if (get_dirty_pages(inode) ||
685 !is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR)) {
686 spin_unlock(&sbi->dir_inode_lock);
687 return;
688 }
689
690 entry = F2FS_I(inode)->dirty_dir;
691 list_del(&entry->list);
692 F2FS_I(inode)->dirty_dir = NULL;

--- 10 unchanged lines hidden (view full) ---

703}
704
705void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
706{
707 struct list_head *head;
708 struct dir_inode_entry *entry;
709 struct inode *inode;
710retry:
721 !is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR)) {
722 spin_unlock(&sbi->dir_inode_lock);
723 return;
724 }
725
726 entry = F2FS_I(inode)->dirty_dir;
727 list_del(&entry->list);
728 F2FS_I(inode)->dirty_dir = NULL;

--- 10 unchanged lines hidden (view full) ---

739}
740
741void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
742{
743 struct list_head *head;
744 struct dir_inode_entry *entry;
745 struct inode *inode;
746retry:
747 if (unlikely(f2fs_cp_error(sbi)))
748 return;
749
711 spin_lock(&sbi->dir_inode_lock);
712
713 head = &sbi->dir_inode_list;
714 if (list_empty(head)) {
715 spin_unlock(&sbi->dir_inode_lock);
716 return;
717 }
718 entry = list_entry(head->next, struct dir_inode_entry, list);

--- 78 unchanged lines hidden (view full) ---

797 if (!get_pages(sbi, F2FS_WRITEBACK))
798 break;
799
800 io_schedule();
801 }
802 finish_wait(&sbi->cp_wait, &wait);
803}
804
750 spin_lock(&sbi->dir_inode_lock);
751
752 head = &sbi->dir_inode_list;
753 if (list_empty(head)) {
754 spin_unlock(&sbi->dir_inode_lock);
755 return;
756 }
757 entry = list_entry(head->next, struct dir_inode_entry, list);

--- 78 unchanged lines hidden (view full) ---

836 if (!get_pages(sbi, F2FS_WRITEBACK))
837 break;
838
839 io_schedule();
840 }
841 finish_wait(&sbi->cp_wait, &wait);
842}
843
805static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
844static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
806{
807 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
808 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
845{
846 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
847 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
809 nid_t last_nid = 0;
848 struct f2fs_nm_info *nm_i = NM_I(sbi);
849 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
850 nid_t last_nid = nm_i->next_scan_nid;
810 block_t start_blk;
811 struct page *cp_page;
812 unsigned int data_sum_blocks, orphan_blocks;
813 __u32 crc32 = 0;
814 void *kaddr;
815 int i;
816 int cp_payload_blks = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
817

--- 42 unchanged lines hidden (view full) ---

860
861 /* 2 cp + n data seg summary + orphan inode blocks */
862 data_sum_blocks = npages_for_summary_flush(sbi);
863 if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
864 set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
865 else
866 clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
867
851 block_t start_blk;
852 struct page *cp_page;
853 unsigned int data_sum_blocks, orphan_blocks;
854 __u32 crc32 = 0;
855 void *kaddr;
856 int i;
857 int cp_payload_blks = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
858

--- 42 unchanged lines hidden (view full) ---

901
902 /* 2 cp + n data seg summary + orphan inode blocks */
903 data_sum_blocks = npages_for_summary_flush(sbi);
904 if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
905 set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
906 else
907 clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
908
868 orphan_blocks = GET_ORPHAN_BLOCKS(sbi->n_orphans);
909 orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
869 ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
870 orphan_blocks);
871
910 ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
911 orphan_blocks);
912
872 if (is_umount) {
913 if (cpc->reason == CP_UMOUNT) {
873 set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
874 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
875 cp_payload_blks + data_sum_blocks +
876 orphan_blocks + NR_CURSEG_NODE_TYPE);
877 } else {
878 clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
879 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
880 cp_payload_blks + data_sum_blocks +
881 orphan_blocks);
882 }
883
914 set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
915 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
916 cp_payload_blks + data_sum_blocks +
917 orphan_blocks + NR_CURSEG_NODE_TYPE);
918 } else {
919 clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
920 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
921 cp_payload_blks + data_sum_blocks +
922 orphan_blocks);
923 }
924
884 if (sbi->n_orphans)
925 if (orphan_num)
885 set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
886 else
887 clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
888
926 set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
927 else
928 clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
929
930 if (sbi->need_fsck)
931 set_ckpt_flags(ckpt, CP_FSCK_FLAG);
932
889 /* update SIT/NAT bitmap */
890 get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
891 get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
892
893 crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
894 *((__le32 *)((unsigned char *)ckpt +
895 le32_to_cpu(ckpt->checksum_offset)))
896 = cpu_to_le32(crc32);

--- 11 unchanged lines hidden (view full) ---

908 cp_page = grab_meta_page(sbi, start_blk++);
909 kaddr = page_address(cp_page);
910 memcpy(kaddr, (char *)ckpt + i * F2FS_BLKSIZE,
911 (1 << sbi->log_blocksize));
912 set_page_dirty(cp_page);
913 f2fs_put_page(cp_page, 1);
914 }
915
933 /* update SIT/NAT bitmap */
934 get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
935 get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
936
937 crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
938 *((__le32 *)((unsigned char *)ckpt +
939 le32_to_cpu(ckpt->checksum_offset)))
940 = cpu_to_le32(crc32);

--- 11 unchanged lines hidden (view full) ---

952 cp_page = grab_meta_page(sbi, start_blk++);
953 kaddr = page_address(cp_page);
954 memcpy(kaddr, (char *)ckpt + i * F2FS_BLKSIZE,
955 (1 << sbi->log_blocksize));
956 set_page_dirty(cp_page);
957 f2fs_put_page(cp_page, 1);
958 }
959
916 if (sbi->n_orphans) {
960 if (orphan_num) {
917 write_orphan_inodes(sbi, start_blk);
918 start_blk += orphan_blocks;
919 }
920
921 write_data_summaries(sbi, start_blk);
922 start_blk += data_sum_blocks;
961 write_orphan_inodes(sbi, start_blk);
962 start_blk += orphan_blocks;
963 }
964
965 write_data_summaries(sbi, start_blk);
966 start_blk += data_sum_blocks;
923 if (is_umount) {
967 if (cpc->reason == CP_UMOUNT) {
924 write_node_summaries(sbi, start_blk);
925 start_blk += NR_CURSEG_NODE_TYPE;
926 }
927
928 /* writeout checkpoint block */
929 cp_page = grab_meta_page(sbi, start_blk);
930 kaddr = page_address(cp_page);
931 memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));

--- 11 unchanged lines hidden (view full) ---

943
944 /* update user_block_counts */
945 sbi->last_valid_block_count = sbi->total_valid_block_count;
946 sbi->alloc_valid_block_count = 0;
947
948 /* Here, we only have one bio having CP pack */
949 sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
950
968 write_node_summaries(sbi, start_blk);
969 start_blk += NR_CURSEG_NODE_TYPE;
970 }
971
972 /* writeout checkpoint block */
973 cp_page = grab_meta_page(sbi, start_blk);
974 kaddr = page_address(cp_page);
975 memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));

--- 11 unchanged lines hidden (view full) ---

987
988 /* update user_block_counts */
989 sbi->last_valid_block_count = sbi->total_valid_block_count;
990 sbi->alloc_valid_block_count = 0;
991
992 /* Here, we only have one bio having CP pack */
993 sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
994
995 /* wait for previous submitted meta pages writeback */
996 wait_on_all_pages_writeback(sbi);
997
951 release_dirty_inode(sbi);
952
953 if (unlikely(f2fs_cp_error(sbi)))
954 return;
955
956 clear_prefree_segments(sbi);
957 F2FS_RESET_SB_DIRT(sbi);
958}
959
960/*
961 * We guarantee that this checkpoint procedure will not fail.
962 */
998 release_dirty_inode(sbi);
999
1000 if (unlikely(f2fs_cp_error(sbi)))
1001 return;
1002
1003 clear_prefree_segments(sbi);
1004 F2FS_RESET_SB_DIRT(sbi);
1005}
1006
1007/*
1008 * We guarantee that this checkpoint procedure will not fail.
1009 */
963void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
1010void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
964{
965 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
966 unsigned long long ckpt_ver;
967
1011{
1012 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1013 unsigned long long ckpt_ver;
1014
968 trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops");
1015 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
969
970 mutex_lock(&sbi->cp_mutex);
971
1016
1017 mutex_lock(&sbi->cp_mutex);
1018
972 if (!sbi->s_dirty)
1019 if (!sbi->s_dirty && cpc->reason != CP_DISCARD)
973 goto out;
974 if (unlikely(f2fs_cp_error(sbi)))
975 goto out;
976 if (block_operations(sbi))
977 goto out;
978
1020 goto out;
1021 if (unlikely(f2fs_cp_error(sbi)))
1022 goto out;
1023 if (block_operations(sbi))
1024 goto out;
1025
979 trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops");
1026 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
980
981 f2fs_submit_merged_bio(sbi, DATA, WRITE);
982 f2fs_submit_merged_bio(sbi, NODE, WRITE);
983 f2fs_submit_merged_bio(sbi, META, WRITE);
984
985 /*
986 * update checkpoint pack index
987 * Increase the version number so that
988 * SIT entries and seg summaries are written at correct place
989 */
990 ckpt_ver = cur_cp_version(ckpt);
991 ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
992
993 /* write cached NAT/SIT entries to NAT/SIT area */
994 flush_nat_entries(sbi);
1027
1028 f2fs_submit_merged_bio(sbi, DATA, WRITE);
1029 f2fs_submit_merged_bio(sbi, NODE, WRITE);
1030 f2fs_submit_merged_bio(sbi, META, WRITE);
1031
1032 /*
1033 * update checkpoint pack index
1034 * Increase the version number so that
1035 * SIT entries and seg summaries are written at correct place
1036 */
1037 ckpt_ver = cur_cp_version(ckpt);
1038 ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
1039
1040 /* write cached NAT/SIT entries to NAT/SIT area */
1041 flush_nat_entries(sbi);
995 flush_sit_entries(sbi);
1042 flush_sit_entries(sbi, cpc);
996
997 /* unlock all the fs_lock[] in do_checkpoint() */
1043
1044 /* unlock all the fs_lock[] in do_checkpoint() */
998 do_checkpoint(sbi, is_umount);
1045 do_checkpoint(sbi, cpc);
999
1000 unblock_operations(sbi);
1001 stat_inc_cp_count(sbi->stat_info);
1002out:
1003 mutex_unlock(&sbi->cp_mutex);
1046
1047 unblock_operations(sbi);
1048 stat_inc_cp_count(sbi->stat_info);
1049out:
1050 mutex_unlock(&sbi->cp_mutex);
1004 trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint");
1051 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
1005}
1006
1007void init_ino_entry_info(struct f2fs_sb_info *sbi)
1008{
1009 int i;
1010
1011 for (i = 0; i < MAX_INO_ENTRY; i++) {
1052}
1053
1054void init_ino_entry_info(struct f2fs_sb_info *sbi)
1055{
1056 int i;
1057
1058 for (i = 0; i < MAX_INO_ENTRY; i++) {
1012 INIT_RADIX_TREE(&sbi->ino_root[i], GFP_ATOMIC);
1013 spin_lock_init(&sbi->ino_lock[i]);
1014 INIT_LIST_HEAD(&sbi->ino_list[i]);
1059 struct inode_management *im = &sbi->im[i];
1060
1061 INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC);
1062 spin_lock_init(&im->ino_lock);
1063 INIT_LIST_HEAD(&im->ino_list);
1064 im->ino_num = 0;
1015 }
1016
1017 /*
1018 * considering 512 blocks in a segment 8 blocks are needed for cp
1019 * and log segment summaries. Remaining blocks are used to keep
1020 * orphan entries with the limitation one reserved segment
1021 * for cp pack we can have max 1020*504 orphan entries
1022 */
1065 }
1066
1067 /*
1068 * considering 512 blocks in a segment 8 blocks are needed for cp
1069 * and log segment summaries. Remaining blocks are used to keep
1070 * orphan entries with the limitation one reserved segment
1071 * for cp pack we can have max 1020*504 orphan entries
1072 */
1023 sbi->n_orphans = 0;
1024 sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
1025 NR_CURSEG_TYPE) * F2FS_ORPHANS_PER_BLOCK;
1026}
1027
1028int __init create_checkpoint_caches(void)
1029{
1030 ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
1031 sizeof(struct ino_entry));

--- 16 unchanged lines hidden ---
1073 sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
1074 NR_CURSEG_TYPE) * F2FS_ORPHANS_PER_BLOCK;
1075}
1076
1077int __init create_checkpoint_caches(void)
1078{
1079 ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
1080 sizeof(struct ino_entry));

--- 16 unchanged lines hidden ---