xref: /openbmc/linux/fs/f2fs/checkpoint.c (revision e9246c87)
1 /*
2  * fs/f2fs/checkpoint.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/bio.h>
13 #include <linux/mpage.h>
14 #include <linux/writeback.h>
15 #include <linux/blkdev.h>
16 #include <linux/f2fs_fs.h>
17 #include <linux/pagevec.h>
18 #include <linux/swap.h>
19 
20 #include "f2fs.h"
21 #include "node.h"
22 #include "segment.h"
23 #include <trace/events/f2fs.h>
24 
25 static struct kmem_cache *ino_entry_slab;
26 static struct kmem_cache *inode_entry_slab;
27 
28 /*
29  * We guarantee no failure on the returned page.
30  */
31 struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
32 {
33 	struct address_space *mapping = META_MAPPING(sbi);
34 	struct page *page = NULL;
35 repeat:
36 	page = grab_cache_page(mapping, index);
37 	if (!page) {
38 		cond_resched();
39 		goto repeat;
40 	}
41 	f2fs_wait_on_page_writeback(page, META);
42 	SetPageUptodate(page);
43 	return page;
44 }
45 
46 /*
47  * We guarantee no failure on the returned page.
48  */
49 struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
50 {
51 	struct address_space *mapping = META_MAPPING(sbi);
52 	struct page *page;
53 repeat:
54 	page = grab_cache_page(mapping, index);
55 	if (!page) {
56 		cond_resched();
57 		goto repeat;
58 	}
59 	if (PageUptodate(page))
60 		goto out;
61 
62 	if (f2fs_submit_page_bio(sbi, page, index,
63 				READ_SYNC | REQ_META | REQ_PRIO))
64 		goto repeat;
65 
66 	lock_page(page);
67 	if (unlikely(page->mapping != mapping)) {
68 		f2fs_put_page(page, 1);
69 		goto repeat;
70 	}
71 out:
72 	return page;
73 }
74 
75 struct page *get_meta_page_ra(struct f2fs_sb_info *sbi, pgoff_t index)
76 {
77 	bool readahead = false;
78 	struct page *page;
79 
80 	page = find_get_page(META_MAPPING(sbi), index);
81 	if (!page || (page && !PageUptodate(page)))
82 		readahead = true;
83 	f2fs_put_page(page, 0);
84 
85 	if (readahead)
86 		ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR);
87 	return get_meta_page(sbi, index);
88 }
89 
90 static inline block_t get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
91 {
92 	switch (type) {
93 	case META_NAT:
94 		return NM_I(sbi)->max_nid / NAT_ENTRY_PER_BLOCK;
95 	case META_SIT:
96 		return SIT_BLK_CNT(sbi);
97 	case META_SSA:
98 	case META_CP:
99 		return 0;
100 	case META_POR:
101 		return MAX_BLKADDR(sbi);
102 	default:
103 		BUG();
104 	}
105 }
106 
107 /*
108  * Readahead CP/NAT/SIT/SSA pages
109  */
110 int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type)
111 {
112 	block_t prev_blk_addr = 0;
113 	struct page *page;
114 	block_t blkno = start;
115 	block_t max_blks = get_max_meta_blks(sbi, type);
116 
117 	struct f2fs_io_info fio = {
118 		.type = META,
119 		.rw = READ_SYNC | REQ_META | REQ_PRIO
120 	};
121 
122 	for (; nrpages-- > 0; blkno++) {
123 		block_t blk_addr;
124 
125 		switch (type) {
126 		case META_NAT:
127 			/* get nat block addr */
128 			if (unlikely(blkno >= max_blks))
129 				blkno = 0;
130 			blk_addr = current_nat_addr(sbi,
131 					blkno * NAT_ENTRY_PER_BLOCK);
132 			break;
133 		case META_SIT:
134 			/* get sit block addr */
135 			if (unlikely(blkno >= max_blks))
136 				goto out;
137 			blk_addr = current_sit_addr(sbi,
138 					blkno * SIT_ENTRY_PER_BLOCK);
139 			if (blkno != start && prev_blk_addr + 1 != blk_addr)
140 				goto out;
141 			prev_blk_addr = blk_addr;
142 			break;
143 		case META_SSA:
144 		case META_CP:
145 		case META_POR:
146 			if (unlikely(blkno >= max_blks))
147 				goto out;
148 			if (unlikely(blkno < SEG0_BLKADDR(sbi)))
149 				goto out;
150 			blk_addr = blkno;
151 			break;
152 		default:
153 			BUG();
154 		}
155 
156 		page = grab_cache_page(META_MAPPING(sbi), blk_addr);
157 		if (!page)
158 			continue;
159 		if (PageUptodate(page)) {
160 			f2fs_put_page(page, 1);
161 			continue;
162 		}
163 
164 		f2fs_submit_page_mbio(sbi, page, blk_addr, &fio);
165 		f2fs_put_page(page, 0);
166 	}
167 out:
168 	f2fs_submit_merged_bio(sbi, META, READ);
169 	return blkno - start;
170 }
171 
172 static int f2fs_write_meta_page(struct page *page,
173 				struct writeback_control *wbc)
174 {
175 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
176 
177 	trace_f2fs_writepage(page, META);
178 
179 	if (unlikely(sbi->por_doing))
180 		goto redirty_out;
181 	if (wbc->for_reclaim)
182 		goto redirty_out;
183 	if (unlikely(f2fs_cp_error(sbi)))
184 		goto redirty_out;
185 
186 	f2fs_wait_on_page_writeback(page, META);
187 	write_meta_page(sbi, page);
188 	dec_page_count(sbi, F2FS_DIRTY_META);
189 	unlock_page(page);
190 	return 0;
191 
192 redirty_out:
193 	redirty_page_for_writepage(wbc, page);
194 	return AOP_WRITEPAGE_ACTIVATE;
195 }
196 
197 static int f2fs_write_meta_pages(struct address_space *mapping,
198 				struct writeback_control *wbc)
199 {
200 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
201 	long diff, written;
202 
203 	trace_f2fs_writepages(mapping->host, wbc, META);
204 
205 	/* collect a number of dirty meta pages and write together */
206 	if (wbc->for_kupdate ||
207 		get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
208 		goto skip_write;
209 
210 	/* if mounting is failed, skip writing node pages */
211 	mutex_lock(&sbi->cp_mutex);
212 	diff = nr_pages_to_write(sbi, META, wbc);
213 	written = sync_meta_pages(sbi, META, wbc->nr_to_write);
214 	mutex_unlock(&sbi->cp_mutex);
215 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
216 	return 0;
217 
218 skip_write:
219 	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
220 	return 0;
221 }
222 
223 long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
224 						long nr_to_write)
225 {
226 	struct address_space *mapping = META_MAPPING(sbi);
227 	pgoff_t index = 0, end = LONG_MAX;
228 	struct pagevec pvec;
229 	long nwritten = 0;
230 	struct writeback_control wbc = {
231 		.for_reclaim = 0,
232 	};
233 
234 	pagevec_init(&pvec, 0);
235 
236 	while (index <= end) {
237 		int i, nr_pages;
238 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
239 				PAGECACHE_TAG_DIRTY,
240 				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
241 		if (unlikely(nr_pages == 0))
242 			break;
243 
244 		for (i = 0; i < nr_pages; i++) {
245 			struct page *page = pvec.pages[i];
246 
247 			lock_page(page);
248 
249 			if (unlikely(page->mapping != mapping)) {
250 continue_unlock:
251 				unlock_page(page);
252 				continue;
253 			}
254 			if (!PageDirty(page)) {
255 				/* someone wrote it for us */
256 				goto continue_unlock;
257 			}
258 
259 			if (!clear_page_dirty_for_io(page))
260 				goto continue_unlock;
261 
262 			if (f2fs_write_meta_page(page, &wbc)) {
263 				unlock_page(page);
264 				break;
265 			}
266 			nwritten++;
267 			if (unlikely(nwritten >= nr_to_write))
268 				break;
269 		}
270 		pagevec_release(&pvec);
271 		cond_resched();
272 	}
273 
274 	if (nwritten)
275 		f2fs_submit_merged_bio(sbi, type, WRITE);
276 
277 	return nwritten;
278 }
279 
280 static int f2fs_set_meta_page_dirty(struct page *page)
281 {
282 	trace_f2fs_set_page_dirty(page, META);
283 
284 	SetPageUptodate(page);
285 	if (!PageDirty(page)) {
286 		__set_page_dirty_nobuffers(page);
287 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
288 		return 1;
289 	}
290 	return 0;
291 }
292 
293 const struct address_space_operations f2fs_meta_aops = {
294 	.writepage	= f2fs_write_meta_page,
295 	.writepages	= f2fs_write_meta_pages,
296 	.set_page_dirty	= f2fs_set_meta_page_dirty,
297 };
298 
299 static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
300 {
301 	struct ino_entry *e;
302 retry:
303 	spin_lock(&sbi->ino_lock[type]);
304 
305 	e = radix_tree_lookup(&sbi->ino_root[type], ino);
306 	if (!e) {
307 		e = kmem_cache_alloc(ino_entry_slab, GFP_ATOMIC);
308 		if (!e) {
309 			spin_unlock(&sbi->ino_lock[type]);
310 			goto retry;
311 		}
312 		if (radix_tree_insert(&sbi->ino_root[type], ino, e)) {
313 			spin_unlock(&sbi->ino_lock[type]);
314 			kmem_cache_free(ino_entry_slab, e);
315 			goto retry;
316 		}
317 		memset(e, 0, sizeof(struct ino_entry));
318 		e->ino = ino;
319 
320 		list_add_tail(&e->list, &sbi->ino_list[type]);
321 	}
322 	spin_unlock(&sbi->ino_lock[type]);
323 }
324 
325 static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
326 {
327 	struct ino_entry *e;
328 
329 	spin_lock(&sbi->ino_lock[type]);
330 	e = radix_tree_lookup(&sbi->ino_root[type], ino);
331 	if (e) {
332 		list_del(&e->list);
333 		radix_tree_delete(&sbi->ino_root[type], ino);
334 		if (type == ORPHAN_INO)
335 			sbi->n_orphans--;
336 		spin_unlock(&sbi->ino_lock[type]);
337 		kmem_cache_free(ino_entry_slab, e);
338 		return;
339 	}
340 	spin_unlock(&sbi->ino_lock[type]);
341 }
342 
343 void add_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
344 {
345 	/* add new dirty ino entry into list */
346 	__add_ino_entry(sbi, ino, type);
347 }
348 
349 void remove_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
350 {
351 	/* remove dirty ino entry from list */
352 	__remove_ino_entry(sbi, ino, type);
353 }
354 
355 /* mode should be APPEND_INO or UPDATE_INO */
356 bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
357 {
358 	struct ino_entry *e;
359 	spin_lock(&sbi->ino_lock[mode]);
360 	e = radix_tree_lookup(&sbi->ino_root[mode], ino);
361 	spin_unlock(&sbi->ino_lock[mode]);
362 	return e ? true : false;
363 }
364 
365 void release_dirty_inode(struct f2fs_sb_info *sbi)
366 {
367 	struct ino_entry *e, *tmp;
368 	int i;
369 
370 	for (i = APPEND_INO; i <= UPDATE_INO; i++) {
371 		spin_lock(&sbi->ino_lock[i]);
372 		list_for_each_entry_safe(e, tmp, &sbi->ino_list[i], list) {
373 			list_del(&e->list);
374 			radix_tree_delete(&sbi->ino_root[i], e->ino);
375 			kmem_cache_free(ino_entry_slab, e);
376 		}
377 		spin_unlock(&sbi->ino_lock[i]);
378 	}
379 }
380 
381 int acquire_orphan_inode(struct f2fs_sb_info *sbi)
382 {
383 	int err = 0;
384 
385 	spin_lock(&sbi->ino_lock[ORPHAN_INO]);
386 	if (unlikely(sbi->n_orphans >= sbi->max_orphans))
387 		err = -ENOSPC;
388 	else
389 		sbi->n_orphans++;
390 	spin_unlock(&sbi->ino_lock[ORPHAN_INO]);
391 
392 	return err;
393 }
394 
395 void release_orphan_inode(struct f2fs_sb_info *sbi)
396 {
397 	spin_lock(&sbi->ino_lock[ORPHAN_INO]);
398 	f2fs_bug_on(sbi, sbi->n_orphans == 0);
399 	sbi->n_orphans--;
400 	spin_unlock(&sbi->ino_lock[ORPHAN_INO]);
401 }
402 
403 void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
404 {
405 	/* add new orphan ino entry into list */
406 	__add_ino_entry(sbi, ino, ORPHAN_INO);
407 }
408 
409 void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
410 {
411 	/* remove orphan entry from orphan list */
412 	__remove_ino_entry(sbi, ino, ORPHAN_INO);
413 }
414 
415 static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
416 {
417 	struct inode *inode = f2fs_iget(sbi->sb, ino);
418 	f2fs_bug_on(sbi, IS_ERR(inode));
419 	clear_nlink(inode);
420 
421 	/* truncate all the data during iput */
422 	iput(inode);
423 }
424 
425 void recover_orphan_inodes(struct f2fs_sb_info *sbi)
426 {
427 	block_t start_blk, orphan_blkaddr, i, j;
428 
429 	if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
430 		return;
431 
432 	sbi->por_doing = true;
433 
434 	start_blk = __start_cp_addr(sbi) + 1 +
435 		le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
436 	orphan_blkaddr = __start_sum_addr(sbi) - 1;
437 
438 	ra_meta_pages(sbi, start_blk, orphan_blkaddr, META_CP);
439 
440 	for (i = 0; i < orphan_blkaddr; i++) {
441 		struct page *page = get_meta_page(sbi, start_blk + i);
442 		struct f2fs_orphan_block *orphan_blk;
443 
444 		orphan_blk = (struct f2fs_orphan_block *)page_address(page);
445 		for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
446 			nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
447 			recover_orphan_inode(sbi, ino);
448 		}
449 		f2fs_put_page(page, 1);
450 	}
451 	/* clear Orphan Flag */
452 	clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
453 	sbi->por_doing = false;
454 	return;
455 }
456 
457 static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
458 {
459 	struct list_head *head;
460 	struct f2fs_orphan_block *orphan_blk = NULL;
461 	unsigned int nentries = 0;
462 	unsigned short index;
463 	unsigned short orphan_blocks =
464 			(unsigned short)GET_ORPHAN_BLOCKS(sbi->n_orphans);
465 	struct page *page = NULL;
466 	struct ino_entry *orphan = NULL;
467 
468 	for (index = 0; index < orphan_blocks; index++)
469 		grab_meta_page(sbi, start_blk + index);
470 
471 	index = 1;
472 	spin_lock(&sbi->ino_lock[ORPHAN_INO]);
473 	head = &sbi->ino_list[ORPHAN_INO];
474 
475 	/* loop for each orphan inode entry and write them in Jornal block */
476 	list_for_each_entry(orphan, head, list) {
477 		if (!page) {
478 			page = find_get_page(META_MAPPING(sbi), start_blk++);
479 			f2fs_bug_on(sbi, !page);
480 			orphan_blk =
481 				(struct f2fs_orphan_block *)page_address(page);
482 			memset(orphan_blk, 0, sizeof(*orphan_blk));
483 			f2fs_put_page(page, 0);
484 		}
485 
486 		orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
487 
488 		if (nentries == F2FS_ORPHANS_PER_BLOCK) {
489 			/*
490 			 * an orphan block is full of 1020 entries,
491 			 * then we need to flush current orphan blocks
492 			 * and bring another one in memory
493 			 */
494 			orphan_blk->blk_addr = cpu_to_le16(index);
495 			orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
496 			orphan_blk->entry_count = cpu_to_le32(nentries);
497 			set_page_dirty(page);
498 			f2fs_put_page(page, 1);
499 			index++;
500 			nentries = 0;
501 			page = NULL;
502 		}
503 	}
504 
505 	if (page) {
506 		orphan_blk->blk_addr = cpu_to_le16(index);
507 		orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
508 		orphan_blk->entry_count = cpu_to_le32(nentries);
509 		set_page_dirty(page);
510 		f2fs_put_page(page, 1);
511 	}
512 
513 	spin_unlock(&sbi->ino_lock[ORPHAN_INO]);
514 }
515 
516 static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
517 				block_t cp_addr, unsigned long long *version)
518 {
519 	struct page *cp_page_1, *cp_page_2 = NULL;
520 	unsigned long blk_size = sbi->blocksize;
521 	struct f2fs_checkpoint *cp_block;
522 	unsigned long long cur_version = 0, pre_version = 0;
523 	size_t crc_offset;
524 	__u32 crc = 0;
525 
526 	/* Read the 1st cp block in this CP pack */
527 	cp_page_1 = get_meta_page(sbi, cp_addr);
528 
529 	/* get the version number */
530 	cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
531 	crc_offset = le32_to_cpu(cp_block->checksum_offset);
532 	if (crc_offset >= blk_size)
533 		goto invalid_cp1;
534 
535 	crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset)));
536 	if (!f2fs_crc_valid(crc, cp_block, crc_offset))
537 		goto invalid_cp1;
538 
539 	pre_version = cur_cp_version(cp_block);
540 
541 	/* Read the 2nd cp block in this CP pack */
542 	cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
543 	cp_page_2 = get_meta_page(sbi, cp_addr);
544 
545 	cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
546 	crc_offset = le32_to_cpu(cp_block->checksum_offset);
547 	if (crc_offset >= blk_size)
548 		goto invalid_cp2;
549 
550 	crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset)));
551 	if (!f2fs_crc_valid(crc, cp_block, crc_offset))
552 		goto invalid_cp2;
553 
554 	cur_version = cur_cp_version(cp_block);
555 
556 	if (cur_version == pre_version) {
557 		*version = cur_version;
558 		f2fs_put_page(cp_page_2, 1);
559 		return cp_page_1;
560 	}
561 invalid_cp2:
562 	f2fs_put_page(cp_page_2, 1);
563 invalid_cp1:
564 	f2fs_put_page(cp_page_1, 1);
565 	return NULL;
566 }
567 
568 int get_valid_checkpoint(struct f2fs_sb_info *sbi)
569 {
570 	struct f2fs_checkpoint *cp_block;
571 	struct f2fs_super_block *fsb = sbi->raw_super;
572 	struct page *cp1, *cp2, *cur_page;
573 	unsigned long blk_size = sbi->blocksize;
574 	unsigned long long cp1_version = 0, cp2_version = 0;
575 	unsigned long long cp_start_blk_no;
576 	unsigned int cp_blks = 1 + le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
577 	block_t cp_blk_no;
578 	int i;
579 
580 	sbi->ckpt = kzalloc(cp_blks * blk_size, GFP_KERNEL);
581 	if (!sbi->ckpt)
582 		return -ENOMEM;
583 	/*
584 	 * Finding out valid cp block involves read both
585 	 * sets( cp pack1 and cp pack 2)
586 	 */
587 	cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
588 	cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
589 
590 	/* The second checkpoint pack should start at the next segment */
591 	cp_start_blk_no += ((unsigned long long)1) <<
592 				le32_to_cpu(fsb->log_blocks_per_seg);
593 	cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
594 
595 	if (cp1 && cp2) {
596 		if (ver_after(cp2_version, cp1_version))
597 			cur_page = cp2;
598 		else
599 			cur_page = cp1;
600 	} else if (cp1) {
601 		cur_page = cp1;
602 	} else if (cp2) {
603 		cur_page = cp2;
604 	} else {
605 		goto fail_no_cp;
606 	}
607 
608 	cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
609 	memcpy(sbi->ckpt, cp_block, blk_size);
610 
611 	if (cp_blks <= 1)
612 		goto done;
613 
614 	cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
615 	if (cur_page == cp2)
616 		cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
617 
618 	for (i = 1; i < cp_blks; i++) {
619 		void *sit_bitmap_ptr;
620 		unsigned char *ckpt = (unsigned char *)sbi->ckpt;
621 
622 		cur_page = get_meta_page(sbi, cp_blk_no + i);
623 		sit_bitmap_ptr = page_address(cur_page);
624 		memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
625 		f2fs_put_page(cur_page, 1);
626 	}
627 done:
628 	f2fs_put_page(cp1, 1);
629 	f2fs_put_page(cp2, 1);
630 	return 0;
631 
632 fail_no_cp:
633 	kfree(sbi->ckpt);
634 	return -EINVAL;
635 }
636 
637 static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new)
638 {
639 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
640 
641 	if (is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR))
642 		return -EEXIST;
643 
644 	set_inode_flag(F2FS_I(inode), FI_DIRTY_DIR);
645 	F2FS_I(inode)->dirty_dir = new;
646 	list_add_tail(&new->list, &sbi->dir_inode_list);
647 	stat_inc_dirty_dir(sbi);
648 	return 0;
649 }
650 
651 void update_dirty_page(struct inode *inode, struct page *page)
652 {
653 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
654 	struct dir_inode_entry *new;
655 	int ret = 0;
656 
657 	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode))
658 		return;
659 
660 	if (!S_ISDIR(inode->i_mode)) {
661 		inode_inc_dirty_pages(inode);
662 		goto out;
663 	}
664 
665 	new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
666 	new->inode = inode;
667 	INIT_LIST_HEAD(&new->list);
668 
669 	spin_lock(&sbi->dir_inode_lock);
670 	ret = __add_dirty_inode(inode, new);
671 	inode_inc_dirty_pages(inode);
672 	spin_unlock(&sbi->dir_inode_lock);
673 
674 	if (ret)
675 		kmem_cache_free(inode_entry_slab, new);
676 out:
677 	SetPagePrivate(page);
678 }
679 
680 void add_dirty_dir_inode(struct inode *inode)
681 {
682 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
683 	struct dir_inode_entry *new =
684 			f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
685 	int ret = 0;
686 
687 	new->inode = inode;
688 	INIT_LIST_HEAD(&new->list);
689 
690 	spin_lock(&sbi->dir_inode_lock);
691 	ret = __add_dirty_inode(inode, new);
692 	spin_unlock(&sbi->dir_inode_lock);
693 
694 	if (ret)
695 		kmem_cache_free(inode_entry_slab, new);
696 }
697 
698 void remove_dirty_dir_inode(struct inode *inode)
699 {
700 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
701 	struct dir_inode_entry *entry;
702 
703 	if (!S_ISDIR(inode->i_mode))
704 		return;
705 
706 	spin_lock(&sbi->dir_inode_lock);
707 	if (get_dirty_pages(inode) ||
708 			!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR)) {
709 		spin_unlock(&sbi->dir_inode_lock);
710 		return;
711 	}
712 
713 	entry = F2FS_I(inode)->dirty_dir;
714 	list_del(&entry->list);
715 	F2FS_I(inode)->dirty_dir = NULL;
716 	clear_inode_flag(F2FS_I(inode), FI_DIRTY_DIR);
717 	stat_dec_dirty_dir(sbi);
718 	spin_unlock(&sbi->dir_inode_lock);
719 	kmem_cache_free(inode_entry_slab, entry);
720 
721 	/* Only from the recovery routine */
722 	if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) {
723 		clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT);
724 		iput(inode);
725 	}
726 }
727 
728 void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
729 {
730 	struct list_head *head;
731 	struct dir_inode_entry *entry;
732 	struct inode *inode;
733 retry:
734 	spin_lock(&sbi->dir_inode_lock);
735 
736 	head = &sbi->dir_inode_list;
737 	if (list_empty(head)) {
738 		spin_unlock(&sbi->dir_inode_lock);
739 		return;
740 	}
741 	entry = list_entry(head->next, struct dir_inode_entry, list);
742 	inode = igrab(entry->inode);
743 	spin_unlock(&sbi->dir_inode_lock);
744 	if (inode) {
745 		filemap_fdatawrite(inode->i_mapping);
746 		iput(inode);
747 	} else {
748 		/*
749 		 * We should submit bio, since it exists several
750 		 * wribacking dentry pages in the freeing inode.
751 		 */
752 		f2fs_submit_merged_bio(sbi, DATA, WRITE);
753 	}
754 	goto retry;
755 }
756 
757 /*
758  * Freeze all the FS-operations for checkpoint.
759  */
760 static int block_operations(struct f2fs_sb_info *sbi)
761 {
762 	struct writeback_control wbc = {
763 		.sync_mode = WB_SYNC_ALL,
764 		.nr_to_write = LONG_MAX,
765 		.for_reclaim = 0,
766 	};
767 	struct blk_plug plug;
768 	int err = 0;
769 
770 	blk_start_plug(&plug);
771 
772 retry_flush_dents:
773 	f2fs_lock_all(sbi);
774 	/* write all the dirty dentry pages */
775 	if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
776 		f2fs_unlock_all(sbi);
777 		sync_dirty_dir_inodes(sbi);
778 		if (unlikely(f2fs_cp_error(sbi))) {
779 			err = -EIO;
780 			goto out;
781 		}
782 		goto retry_flush_dents;
783 	}
784 
785 	/*
786 	 * POR: we should ensure that there are no dirty node pages
787 	 * until finishing nat/sit flush.
788 	 */
789 retry_flush_nodes:
790 	down_write(&sbi->node_write);
791 
792 	if (get_pages(sbi, F2FS_DIRTY_NODES)) {
793 		up_write(&sbi->node_write);
794 		sync_node_pages(sbi, 0, &wbc);
795 		if (unlikely(f2fs_cp_error(sbi))) {
796 			f2fs_unlock_all(sbi);
797 			err = -EIO;
798 			goto out;
799 		}
800 		goto retry_flush_nodes;
801 	}
802 out:
803 	blk_finish_plug(&plug);
804 	return err;
805 }
806 
807 static void unblock_operations(struct f2fs_sb_info *sbi)
808 {
809 	up_write(&sbi->node_write);
810 	f2fs_unlock_all(sbi);
811 }
812 
813 static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
814 {
815 	DEFINE_WAIT(wait);
816 
817 	for (;;) {
818 		prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
819 
820 		if (!get_pages(sbi, F2FS_WRITEBACK))
821 			break;
822 
823 		io_schedule();
824 	}
825 	finish_wait(&sbi->cp_wait, &wait);
826 }
827 
828 static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
829 {
830 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
831 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
832 	struct f2fs_nm_info *nm_i = NM_I(sbi);
833 	nid_t last_nid = nm_i->next_scan_nid;
834 	block_t start_blk;
835 	struct page *cp_page;
836 	unsigned int data_sum_blocks, orphan_blocks;
837 	__u32 crc32 = 0;
838 	void *kaddr;
839 	int i;
840 	int cp_payload_blks = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
841 
842 	/*
843 	 * This avoids to conduct wrong roll-forward operations and uses
844 	 * metapages, so should be called prior to sync_meta_pages below.
845 	 */
846 	discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg));
847 
848 	/* Flush all the NAT/SIT pages */
849 	while (get_pages(sbi, F2FS_DIRTY_META)) {
850 		sync_meta_pages(sbi, META, LONG_MAX);
851 		if (unlikely(f2fs_cp_error(sbi)))
852 			return;
853 	}
854 
855 	next_free_nid(sbi, &last_nid);
856 
857 	/*
858 	 * modify checkpoint
859 	 * version number is already updated
860 	 */
861 	ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
862 	ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
863 	ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
864 	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
865 		ckpt->cur_node_segno[i] =
866 			cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
867 		ckpt->cur_node_blkoff[i] =
868 			cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
869 		ckpt->alloc_type[i + CURSEG_HOT_NODE] =
870 				curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
871 	}
872 	for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
873 		ckpt->cur_data_segno[i] =
874 			cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
875 		ckpt->cur_data_blkoff[i] =
876 			cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
877 		ckpt->alloc_type[i + CURSEG_HOT_DATA] =
878 				curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
879 	}
880 
881 	ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
882 	ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
883 	ckpt->next_free_nid = cpu_to_le32(last_nid);
884 
885 	/* 2 cp  + n data seg summary + orphan inode blocks */
886 	data_sum_blocks = npages_for_summary_flush(sbi);
887 	if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
888 		set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
889 	else
890 		clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
891 
892 	orphan_blocks = GET_ORPHAN_BLOCKS(sbi->n_orphans);
893 	ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
894 			orphan_blocks);
895 
896 	if (cpc->reason == CP_UMOUNT) {
897 		set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
898 		ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
899 				cp_payload_blks + data_sum_blocks +
900 				orphan_blocks + NR_CURSEG_NODE_TYPE);
901 	} else {
902 		clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
903 		ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
904 				cp_payload_blks + data_sum_blocks +
905 				orphan_blocks);
906 	}
907 
908 	if (sbi->n_orphans)
909 		set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
910 	else
911 		clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
912 
913 	if (sbi->need_fsck)
914 		set_ckpt_flags(ckpt, CP_FSCK_FLAG);
915 
916 	/* update SIT/NAT bitmap */
917 	get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
918 	get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
919 
920 	crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
921 	*((__le32 *)((unsigned char *)ckpt +
922 				le32_to_cpu(ckpt->checksum_offset)))
923 				= cpu_to_le32(crc32);
924 
925 	start_blk = __start_cp_addr(sbi);
926 
927 	/* write out checkpoint buffer at block 0 */
928 	cp_page = grab_meta_page(sbi, start_blk++);
929 	kaddr = page_address(cp_page);
930 	memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
931 	set_page_dirty(cp_page);
932 	f2fs_put_page(cp_page, 1);
933 
934 	for (i = 1; i < 1 + cp_payload_blks; i++) {
935 		cp_page = grab_meta_page(sbi, start_blk++);
936 		kaddr = page_address(cp_page);
937 		memcpy(kaddr, (char *)ckpt + i * F2FS_BLKSIZE,
938 				(1 << sbi->log_blocksize));
939 		set_page_dirty(cp_page);
940 		f2fs_put_page(cp_page, 1);
941 	}
942 
943 	if (sbi->n_orphans) {
944 		write_orphan_inodes(sbi, start_blk);
945 		start_blk += orphan_blocks;
946 	}
947 
948 	write_data_summaries(sbi, start_blk);
949 	start_blk += data_sum_blocks;
950 	if (cpc->reason == CP_UMOUNT) {
951 		write_node_summaries(sbi, start_blk);
952 		start_blk += NR_CURSEG_NODE_TYPE;
953 	}
954 
955 	/* writeout checkpoint block */
956 	cp_page = grab_meta_page(sbi, start_blk);
957 	kaddr = page_address(cp_page);
958 	memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
959 	set_page_dirty(cp_page);
960 	f2fs_put_page(cp_page, 1);
961 
962 	/* wait for previous submitted node/meta pages writeback */
963 	wait_on_all_pages_writeback(sbi);
964 
965 	if (unlikely(f2fs_cp_error(sbi)))
966 		return;
967 
968 	filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX);
969 	filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX);
970 
971 	/* update user_block_counts */
972 	sbi->last_valid_block_count = sbi->total_valid_block_count;
973 	sbi->alloc_valid_block_count = 0;
974 
975 	/* Here, we only have one bio having CP pack */
976 	sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
977 
978 	release_dirty_inode(sbi);
979 
980 	if (unlikely(f2fs_cp_error(sbi)))
981 		return;
982 
983 	clear_prefree_segments(sbi);
984 	F2FS_RESET_SB_DIRT(sbi);
985 }
986 
987 /*
988  * We guarantee that this checkpoint procedure will not fail.
989  */
990 void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
991 {
992 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
993 	unsigned long long ckpt_ver;
994 
995 	trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
996 
997 	mutex_lock(&sbi->cp_mutex);
998 
999 	if (!sbi->s_dirty && cpc->reason != CP_DISCARD)
1000 		goto out;
1001 	if (unlikely(f2fs_cp_error(sbi)))
1002 		goto out;
1003 	if (block_operations(sbi))
1004 		goto out;
1005 
1006 	trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
1007 
1008 	f2fs_submit_merged_bio(sbi, DATA, WRITE);
1009 	f2fs_submit_merged_bio(sbi, NODE, WRITE);
1010 	f2fs_submit_merged_bio(sbi, META, WRITE);
1011 
1012 	/*
1013 	 * update checkpoint pack index
1014 	 * Increase the version number so that
1015 	 * SIT entries and seg summaries are written at correct place
1016 	 */
1017 	ckpt_ver = cur_cp_version(ckpt);
1018 	ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
1019 
1020 	/* write cached NAT/SIT entries to NAT/SIT area */
1021 	flush_nat_entries(sbi);
1022 	flush_sit_entries(sbi, cpc);
1023 
1024 	/* unlock all the fs_lock[] in do_checkpoint() */
1025 	do_checkpoint(sbi, cpc);
1026 
1027 	unblock_operations(sbi);
1028 	stat_inc_cp_count(sbi->stat_info);
1029 out:
1030 	mutex_unlock(&sbi->cp_mutex);
1031 	trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
1032 }
1033 
1034 void init_ino_entry_info(struct f2fs_sb_info *sbi)
1035 {
1036 	int i;
1037 
1038 	for (i = 0; i < MAX_INO_ENTRY; i++) {
1039 		INIT_RADIX_TREE(&sbi->ino_root[i], GFP_ATOMIC);
1040 		spin_lock_init(&sbi->ino_lock[i]);
1041 		INIT_LIST_HEAD(&sbi->ino_list[i]);
1042 	}
1043 
1044 	/*
1045 	 * considering 512 blocks in a segment 8 blocks are needed for cp
1046 	 * and log segment summaries. Remaining blocks are used to keep
1047 	 * orphan entries with the limitation one reserved segment
1048 	 * for cp pack we can have max 1020*504 orphan entries
1049 	 */
1050 	sbi->n_orphans = 0;
1051 	sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
1052 			NR_CURSEG_TYPE) * F2FS_ORPHANS_PER_BLOCK;
1053 }
1054 
1055 int __init create_checkpoint_caches(void)
1056 {
1057 	ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
1058 			sizeof(struct ino_entry));
1059 	if (!ino_entry_slab)
1060 		return -ENOMEM;
1061 	inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry",
1062 			sizeof(struct dir_inode_entry));
1063 	if (!inode_entry_slab) {
1064 		kmem_cache_destroy(ino_entry_slab);
1065 		return -ENOMEM;
1066 	}
1067 	return 0;
1068 }
1069 
1070 void destroy_checkpoint_caches(void)
1071 {
1072 	kmem_cache_destroy(ino_entry_slab);
1073 	kmem_cache_destroy(inode_entry_slab);
1074 }
1075