xref: /openbmc/linux/fs/f2fs/segment.c (revision e1f7c9ee)
1 /*
2  * fs/f2fs/segment.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/prefetch.h>
16 #include <linux/kthread.h>
17 #include <linux/vmalloc.h>
18 #include <linux/swap.h>
19 
20 #include "f2fs.h"
21 #include "segment.h"
22 #include "node.h"
23 #include <trace/events/f2fs.h>
24 
25 #define __reverse_ffz(x) __reverse_ffs(~(x))
26 
27 static struct kmem_cache *discard_entry_slab;
28 static struct kmem_cache *sit_entry_set_slab;
29 static struct kmem_cache *inmem_entry_slab;
30 
31 /*
32  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
33  * MSB and LSB are reversed in a byte by f2fs_set_bit.
34  */
35 static inline unsigned long __reverse_ffs(unsigned long word)
36 {
37 	int num = 0;
38 
39 #if BITS_PER_LONG == 64
40 	if ((word & 0xffffffff) == 0) {
41 		num += 32;
42 		word >>= 32;
43 	}
44 #endif
45 	if ((word & 0xffff) == 0) {
46 		num += 16;
47 		word >>= 16;
48 	}
49 	if ((word & 0xff) == 0) {
50 		num += 8;
51 		word >>= 8;
52 	}
53 	if ((word & 0xf0) == 0)
54 		num += 4;
55 	else
56 		word >>= 4;
57 	if ((word & 0xc) == 0)
58 		num += 2;
59 	else
60 		word >>= 2;
61 	if ((word & 0x2) == 0)
62 		num += 1;
63 	return num;
64 }
65 
66 /*
67  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
68  * f2fs_set_bit makes MSB and LSB reversed in a byte.
69  * Example:
70  *                             LSB <--> MSB
71  *   f2fs_set_bit(0, bitmap) => 0000 0001
72  *   f2fs_set_bit(7, bitmap) => 1000 0000
73  */
74 static unsigned long __find_rev_next_bit(const unsigned long *addr,
75 			unsigned long size, unsigned long offset)
76 {
77 	const unsigned long *p = addr + BIT_WORD(offset);
78 	unsigned long result = offset & ~(BITS_PER_LONG - 1);
79 	unsigned long tmp;
80 	unsigned long mask, submask;
81 	unsigned long quot, rest;
82 
83 	if (offset >= size)
84 		return size;
85 
86 	size -= result;
87 	offset %= BITS_PER_LONG;
88 	if (!offset)
89 		goto aligned;
90 
91 	tmp = *(p++);
92 	quot = (offset >> 3) << 3;
93 	rest = offset & 0x7;
94 	mask = ~0UL << quot;
95 	submask = (unsigned char)(0xff << rest) >> rest;
96 	submask <<= quot;
97 	mask &= submask;
98 	tmp &= mask;
99 	if (size < BITS_PER_LONG)
100 		goto found_first;
101 	if (tmp)
102 		goto found_middle;
103 
104 	size -= BITS_PER_LONG;
105 	result += BITS_PER_LONG;
106 aligned:
107 	while (size & ~(BITS_PER_LONG-1)) {
108 		tmp = *(p++);
109 		if (tmp)
110 			goto found_middle;
111 		result += BITS_PER_LONG;
112 		size -= BITS_PER_LONG;
113 	}
114 	if (!size)
115 		return result;
116 	tmp = *p;
117 found_first:
118 	tmp &= (~0UL >> (BITS_PER_LONG - size));
119 	if (tmp == 0UL)		/* Are any bits set? */
120 		return result + size;   /* Nope. */
121 found_middle:
122 	return result + __reverse_ffs(tmp);
123 }
124 
125 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
126 			unsigned long size, unsigned long offset)
127 {
128 	const unsigned long *p = addr + BIT_WORD(offset);
129 	unsigned long result = offset & ~(BITS_PER_LONG - 1);
130 	unsigned long tmp;
131 	unsigned long mask, submask;
132 	unsigned long quot, rest;
133 
134 	if (offset >= size)
135 		return size;
136 
137 	size -= result;
138 	offset %= BITS_PER_LONG;
139 	if (!offset)
140 		goto aligned;
141 
142 	tmp = *(p++);
143 	quot = (offset >> 3) << 3;
144 	rest = offset & 0x7;
145 	mask = ~(~0UL << quot);
146 	submask = (unsigned char)~((unsigned char)(0xff << rest) >> rest);
147 	submask <<= quot;
148 	mask += submask;
149 	tmp |= mask;
150 	if (size < BITS_PER_LONG)
151 		goto found_first;
152 	if (~tmp)
153 		goto found_middle;
154 
155 	size -= BITS_PER_LONG;
156 	result += BITS_PER_LONG;
157 aligned:
158 	while (size & ~(BITS_PER_LONG - 1)) {
159 		tmp = *(p++);
160 		if (~tmp)
161 			goto found_middle;
162 		result += BITS_PER_LONG;
163 		size -= BITS_PER_LONG;
164 	}
165 	if (!size)
166 		return result;
167 	tmp = *p;
168 
169 found_first:
170 	tmp |= ~0UL << size;
171 	if (tmp == ~0UL)        /* Are any bits zero? */
172 		return result + size;   /* Nope. */
173 found_middle:
174 	return result + __reverse_ffz(tmp);
175 }
176 
177 void register_inmem_page(struct inode *inode, struct page *page)
178 {
179 	struct f2fs_inode_info *fi = F2FS_I(inode);
180 	struct inmem_pages *new;
181 
182 	new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
183 
184 	/* add atomic page indices to the list */
185 	new->page = page;
186 	INIT_LIST_HEAD(&new->list);
187 
188 	/* increase reference count with clean state */
189 	mutex_lock(&fi->inmem_lock);
190 	get_page(page);
191 	list_add_tail(&new->list, &fi->inmem_pages);
192 	mutex_unlock(&fi->inmem_lock);
193 }
194 
195 void commit_inmem_pages(struct inode *inode, bool abort)
196 {
197 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
198 	struct f2fs_inode_info *fi = F2FS_I(inode);
199 	struct inmem_pages *cur, *tmp;
200 	bool submit_bio = false;
201 	struct f2fs_io_info fio = {
202 		.type = DATA,
203 		.rw = WRITE_SYNC,
204 	};
205 
206 	f2fs_balance_fs(sbi);
207 	f2fs_lock_op(sbi);
208 
209 	mutex_lock(&fi->inmem_lock);
210 	list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
211 		lock_page(cur->page);
212 		if (!abort && cur->page->mapping == inode->i_mapping) {
213 			f2fs_wait_on_page_writeback(cur->page, DATA);
214 			if (clear_page_dirty_for_io(cur->page))
215 				inode_dec_dirty_pages(inode);
216 			do_write_data_page(cur->page, &fio);
217 			submit_bio = true;
218 		}
219 		f2fs_put_page(cur->page, 1);
220 		list_del(&cur->list);
221 		kmem_cache_free(inmem_entry_slab, cur);
222 	}
223 	if (submit_bio)
224 		f2fs_submit_merged_bio(sbi, DATA, WRITE);
225 	mutex_unlock(&fi->inmem_lock);
226 
227 	filemap_fdatawait_range(inode->i_mapping, 0, LLONG_MAX);
228 	f2fs_unlock_op(sbi);
229 }
230 
231 /*
232  * This function balances dirty node and dentry pages.
233  * In addition, it controls garbage collection.
234  */
235 void f2fs_balance_fs(struct f2fs_sb_info *sbi)
236 {
237 	/*
238 	 * We should do GC or end up with checkpoint, if there are so many dirty
239 	 * dir/node pages without enough free segments.
240 	 */
241 	if (has_not_enough_free_secs(sbi, 0)) {
242 		mutex_lock(&sbi->gc_mutex);
243 		f2fs_gc(sbi);
244 	}
245 }
246 
247 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
248 {
249 	/* check the # of cached NAT entries and prefree segments */
250 	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
251 				excess_prefree_segs(sbi))
252 		f2fs_sync_fs(sbi->sb, true);
253 }
254 
255 static int issue_flush_thread(void *data)
256 {
257 	struct f2fs_sb_info *sbi = data;
258 	struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
259 	wait_queue_head_t *q = &fcc->flush_wait_queue;
260 repeat:
261 	if (kthread_should_stop())
262 		return 0;
263 
264 	if (!llist_empty(&fcc->issue_list)) {
265 		struct bio *bio = bio_alloc(GFP_NOIO, 0);
266 		struct flush_cmd *cmd, *next;
267 		int ret;
268 
269 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
270 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
271 
272 		bio->bi_bdev = sbi->sb->s_bdev;
273 		ret = submit_bio_wait(WRITE_FLUSH, bio);
274 
275 		llist_for_each_entry_safe(cmd, next,
276 					  fcc->dispatch_list, llnode) {
277 			cmd->ret = ret;
278 			complete(&cmd->wait);
279 		}
280 		bio_put(bio);
281 		fcc->dispatch_list = NULL;
282 	}
283 
284 	wait_event_interruptible(*q,
285 		kthread_should_stop() || !llist_empty(&fcc->issue_list));
286 	goto repeat;
287 }
288 
289 int f2fs_issue_flush(struct f2fs_sb_info *sbi)
290 {
291 	struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
292 	struct flush_cmd cmd;
293 
294 	trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
295 					test_opt(sbi, FLUSH_MERGE));
296 
297 	if (test_opt(sbi, NOBARRIER))
298 		return 0;
299 
300 	if (!test_opt(sbi, FLUSH_MERGE))
301 		return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL);
302 
303 	init_completion(&cmd.wait);
304 
305 	llist_add(&cmd.llnode, &fcc->issue_list);
306 
307 	if (!fcc->dispatch_list)
308 		wake_up(&fcc->flush_wait_queue);
309 
310 	wait_for_completion(&cmd.wait);
311 
312 	return cmd.ret;
313 }
314 
315 int create_flush_cmd_control(struct f2fs_sb_info *sbi)
316 {
317 	dev_t dev = sbi->sb->s_bdev->bd_dev;
318 	struct flush_cmd_control *fcc;
319 	int err = 0;
320 
321 	fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
322 	if (!fcc)
323 		return -ENOMEM;
324 	init_waitqueue_head(&fcc->flush_wait_queue);
325 	init_llist_head(&fcc->issue_list);
326 	SM_I(sbi)->cmd_control_info = fcc;
327 	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
328 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
329 	if (IS_ERR(fcc->f2fs_issue_flush)) {
330 		err = PTR_ERR(fcc->f2fs_issue_flush);
331 		kfree(fcc);
332 		SM_I(sbi)->cmd_control_info = NULL;
333 		return err;
334 	}
335 
336 	return err;
337 }
338 
339 void destroy_flush_cmd_control(struct f2fs_sb_info *sbi)
340 {
341 	struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
342 
343 	if (fcc && fcc->f2fs_issue_flush)
344 		kthread_stop(fcc->f2fs_issue_flush);
345 	kfree(fcc);
346 	SM_I(sbi)->cmd_control_info = NULL;
347 }
348 
349 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
350 		enum dirty_type dirty_type)
351 {
352 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
353 
354 	/* need not be added */
355 	if (IS_CURSEG(sbi, segno))
356 		return;
357 
358 	if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
359 		dirty_i->nr_dirty[dirty_type]++;
360 
361 	if (dirty_type == DIRTY) {
362 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
363 		enum dirty_type t = sentry->type;
364 
365 		if (unlikely(t >= DIRTY)) {
366 			f2fs_bug_on(sbi, 1);
367 			return;
368 		}
369 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
370 			dirty_i->nr_dirty[t]++;
371 	}
372 }
373 
374 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
375 		enum dirty_type dirty_type)
376 {
377 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
378 
379 	if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
380 		dirty_i->nr_dirty[dirty_type]--;
381 
382 	if (dirty_type == DIRTY) {
383 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
384 		enum dirty_type t = sentry->type;
385 
386 		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
387 			dirty_i->nr_dirty[t]--;
388 
389 		if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
390 			clear_bit(GET_SECNO(sbi, segno),
391 						dirty_i->victim_secmap);
392 	}
393 }
394 
395 /*
396  * Should not occur error such as -ENOMEM.
397  * Adding dirty entry into seglist is not critical operation.
398  * If a given segment is one of current working segments, it won't be added.
399  */
400 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
401 {
402 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
403 	unsigned short valid_blocks;
404 
405 	if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
406 		return;
407 
408 	mutex_lock(&dirty_i->seglist_lock);
409 
410 	valid_blocks = get_valid_blocks(sbi, segno, 0);
411 
412 	if (valid_blocks == 0) {
413 		__locate_dirty_segment(sbi, segno, PRE);
414 		__remove_dirty_segment(sbi, segno, DIRTY);
415 	} else if (valid_blocks < sbi->blocks_per_seg) {
416 		__locate_dirty_segment(sbi, segno, DIRTY);
417 	} else {
418 		/* Recovery routine with SSR needs this */
419 		__remove_dirty_segment(sbi, segno, DIRTY);
420 	}
421 
422 	mutex_unlock(&dirty_i->seglist_lock);
423 }
424 
425 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
426 				block_t blkstart, block_t blklen)
427 {
428 	sector_t start = SECTOR_FROM_BLOCK(blkstart);
429 	sector_t len = SECTOR_FROM_BLOCK(blklen);
430 	trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
431 	return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
432 }
433 
434 void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
435 {
436 	if (f2fs_issue_discard(sbi, blkaddr, 1)) {
437 		struct page *page = grab_meta_page(sbi, blkaddr);
438 		/* zero-filled page */
439 		set_page_dirty(page);
440 		f2fs_put_page(page, 1);
441 	}
442 }
443 
444 static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
445 {
446 	struct list_head *head = &SM_I(sbi)->discard_list;
447 	struct discard_entry *new;
448 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
449 	int max_blocks = sbi->blocks_per_seg;
450 	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
451 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
452 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
453 	unsigned long dmap[entries];
454 	unsigned int start = 0, end = -1;
455 	bool force = (cpc->reason == CP_DISCARD);
456 	int i;
457 
458 	if (!force && !test_opt(sbi, DISCARD))
459 		return;
460 
461 	if (force && !se->valid_blocks) {
462 		struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
463 		/*
464 		 * if this segment is registered in the prefree list, then
465 		 * we should skip adding a discard candidate, and let the
466 		 * checkpoint do that later.
467 		 */
468 		mutex_lock(&dirty_i->seglist_lock);
469 		if (test_bit(cpc->trim_start, dirty_i->dirty_segmap[PRE])) {
470 			mutex_unlock(&dirty_i->seglist_lock);
471 			cpc->trimmed += sbi->blocks_per_seg;
472 			return;
473 		}
474 		mutex_unlock(&dirty_i->seglist_lock);
475 
476 		new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
477 		INIT_LIST_HEAD(&new->list);
478 		new->blkaddr = START_BLOCK(sbi, cpc->trim_start);
479 		new->len = sbi->blocks_per_seg;
480 		list_add_tail(&new->list, head);
481 		SM_I(sbi)->nr_discards += sbi->blocks_per_seg;
482 		cpc->trimmed += sbi->blocks_per_seg;
483 		return;
484 	}
485 
486 	/* zero block will be discarded through the prefree list */
487 	if (!se->valid_blocks || se->valid_blocks == max_blocks)
488 		return;
489 
490 	/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
491 	for (i = 0; i < entries; i++)
492 		dmap[i] = (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
493 
494 	while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
495 		start = __find_rev_next_bit(dmap, max_blocks, end + 1);
496 		if (start >= max_blocks)
497 			break;
498 
499 		end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
500 
501 		if (end - start < cpc->trim_minlen)
502 			continue;
503 
504 		new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
505 		INIT_LIST_HEAD(&new->list);
506 		new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start;
507 		new->len = end - start;
508 		cpc->trimmed += end - start;
509 
510 		list_add_tail(&new->list, head);
511 		SM_I(sbi)->nr_discards += end - start;
512 	}
513 }
514 
515 void release_discard_addrs(struct f2fs_sb_info *sbi)
516 {
517 	struct list_head *head = &(SM_I(sbi)->discard_list);
518 	struct discard_entry *entry, *this;
519 
520 	/* drop caches */
521 	list_for_each_entry_safe(entry, this, head, list) {
522 		list_del(&entry->list);
523 		kmem_cache_free(discard_entry_slab, entry);
524 	}
525 }
526 
527 /*
528  * Should call clear_prefree_segments after checkpoint is done.
529  */
530 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
531 {
532 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
533 	unsigned int segno;
534 
535 	mutex_lock(&dirty_i->seglist_lock);
536 	for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
537 		__set_test_and_free(sbi, segno);
538 	mutex_unlock(&dirty_i->seglist_lock);
539 }
540 
541 void clear_prefree_segments(struct f2fs_sb_info *sbi)
542 {
543 	struct list_head *head = &(SM_I(sbi)->discard_list);
544 	struct discard_entry *entry, *this;
545 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
546 	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
547 	unsigned int start = 0, end = -1;
548 
549 	mutex_lock(&dirty_i->seglist_lock);
550 
551 	while (1) {
552 		int i;
553 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
554 		if (start >= MAIN_SEGS(sbi))
555 			break;
556 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
557 								start + 1);
558 
559 		for (i = start; i < end; i++)
560 			clear_bit(i, prefree_map);
561 
562 		dirty_i->nr_dirty[PRE] -= end - start;
563 
564 		if (!test_opt(sbi, DISCARD))
565 			continue;
566 
567 		f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
568 				(end - start) << sbi->log_blocks_per_seg);
569 	}
570 	mutex_unlock(&dirty_i->seglist_lock);
571 
572 	/* send small discards */
573 	list_for_each_entry_safe(entry, this, head, list) {
574 		f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
575 		list_del(&entry->list);
576 		SM_I(sbi)->nr_discards -= entry->len;
577 		kmem_cache_free(discard_entry_slab, entry);
578 	}
579 }
580 
581 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
582 {
583 	struct sit_info *sit_i = SIT_I(sbi);
584 
585 	if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
586 		sit_i->dirty_sentries++;
587 		return false;
588 	}
589 
590 	return true;
591 }
592 
593 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
594 					unsigned int segno, int modified)
595 {
596 	struct seg_entry *se = get_seg_entry(sbi, segno);
597 	se->type = type;
598 	if (modified)
599 		__mark_sit_entry_dirty(sbi, segno);
600 }
601 
602 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
603 {
604 	struct seg_entry *se;
605 	unsigned int segno, offset;
606 	long int new_vblocks;
607 
608 	segno = GET_SEGNO(sbi, blkaddr);
609 
610 	se = get_seg_entry(sbi, segno);
611 	new_vblocks = se->valid_blocks + del;
612 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
613 
614 	f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
615 				(new_vblocks > sbi->blocks_per_seg)));
616 
617 	se->valid_blocks = new_vblocks;
618 	se->mtime = get_mtime(sbi);
619 	SIT_I(sbi)->max_mtime = se->mtime;
620 
621 	/* Update valid block bitmap */
622 	if (del > 0) {
623 		if (f2fs_set_bit(offset, se->cur_valid_map))
624 			f2fs_bug_on(sbi, 1);
625 	} else {
626 		if (!f2fs_clear_bit(offset, se->cur_valid_map))
627 			f2fs_bug_on(sbi, 1);
628 	}
629 	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
630 		se->ckpt_valid_blocks += del;
631 
632 	__mark_sit_entry_dirty(sbi, segno);
633 
634 	/* update total number of valid blocks to be written in ckpt area */
635 	SIT_I(sbi)->written_valid_blocks += del;
636 
637 	if (sbi->segs_per_sec > 1)
638 		get_sec_entry(sbi, segno)->valid_blocks += del;
639 }
640 
641 void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
642 {
643 	update_sit_entry(sbi, new, 1);
644 	if (GET_SEGNO(sbi, old) != NULL_SEGNO)
645 		update_sit_entry(sbi, old, -1);
646 
647 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
648 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
649 }
650 
651 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
652 {
653 	unsigned int segno = GET_SEGNO(sbi, addr);
654 	struct sit_info *sit_i = SIT_I(sbi);
655 
656 	f2fs_bug_on(sbi, addr == NULL_ADDR);
657 	if (addr == NEW_ADDR)
658 		return;
659 
660 	/* add it into sit main buffer */
661 	mutex_lock(&sit_i->sentry_lock);
662 
663 	update_sit_entry(sbi, addr, -1);
664 
665 	/* add it into dirty seglist */
666 	locate_dirty_segment(sbi, segno);
667 
668 	mutex_unlock(&sit_i->sentry_lock);
669 }
670 
671 /*
672  * This function should be resided under the curseg_mutex lock
673  */
674 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
675 					struct f2fs_summary *sum)
676 {
677 	struct curseg_info *curseg = CURSEG_I(sbi, type);
678 	void *addr = curseg->sum_blk;
679 	addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
680 	memcpy(addr, sum, sizeof(struct f2fs_summary));
681 }
682 
683 /*
684  * Calculate the number of current summary pages for writing
685  */
686 int npages_for_summary_flush(struct f2fs_sb_info *sbi)
687 {
688 	int valid_sum_count = 0;
689 	int i, sum_in_page;
690 
691 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
692 		if (sbi->ckpt->alloc_type[i] == SSR)
693 			valid_sum_count += sbi->blocks_per_seg;
694 		else
695 			valid_sum_count += curseg_blkoff(sbi, i);
696 	}
697 
698 	sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
699 			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
700 	if (valid_sum_count <= sum_in_page)
701 		return 1;
702 	else if ((valid_sum_count - sum_in_page) <=
703 		(PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
704 		return 2;
705 	return 3;
706 }
707 
708 /*
709  * Caller should put this summary page
710  */
711 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
712 {
713 	return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
714 }
715 
716 static void write_sum_page(struct f2fs_sb_info *sbi,
717 			struct f2fs_summary_block *sum_blk, block_t blk_addr)
718 {
719 	struct page *page = grab_meta_page(sbi, blk_addr);
720 	void *kaddr = page_address(page);
721 	memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
722 	set_page_dirty(page);
723 	f2fs_put_page(page, 1);
724 }
725 
726 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
727 {
728 	struct curseg_info *curseg = CURSEG_I(sbi, type);
729 	unsigned int segno = curseg->segno + 1;
730 	struct free_segmap_info *free_i = FREE_I(sbi);
731 
732 	if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
733 		return !test_bit(segno, free_i->free_segmap);
734 	return 0;
735 }
736 
737 /*
738  * Find a new segment from the free segments bitmap to right order
739  * This function should be returned with success, otherwise BUG
740  */
741 static void get_new_segment(struct f2fs_sb_info *sbi,
742 			unsigned int *newseg, bool new_sec, int dir)
743 {
744 	struct free_segmap_info *free_i = FREE_I(sbi);
745 	unsigned int segno, secno, zoneno;
746 	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
747 	unsigned int hint = *newseg / sbi->segs_per_sec;
748 	unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
749 	unsigned int left_start = hint;
750 	bool init = true;
751 	int go_left = 0;
752 	int i;
753 
754 	write_lock(&free_i->segmap_lock);
755 
756 	if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
757 		segno = find_next_zero_bit(free_i->free_segmap,
758 					MAIN_SEGS(sbi), *newseg + 1);
759 		if (segno - *newseg < sbi->segs_per_sec -
760 					(*newseg % sbi->segs_per_sec))
761 			goto got_it;
762 	}
763 find_other_zone:
764 	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
765 	if (secno >= MAIN_SECS(sbi)) {
766 		if (dir == ALLOC_RIGHT) {
767 			secno = find_next_zero_bit(free_i->free_secmap,
768 							MAIN_SECS(sbi), 0);
769 			f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
770 		} else {
771 			go_left = 1;
772 			left_start = hint - 1;
773 		}
774 	}
775 	if (go_left == 0)
776 		goto skip_left;
777 
778 	while (test_bit(left_start, free_i->free_secmap)) {
779 		if (left_start > 0) {
780 			left_start--;
781 			continue;
782 		}
783 		left_start = find_next_zero_bit(free_i->free_secmap,
784 							MAIN_SECS(sbi), 0);
785 		f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
786 		break;
787 	}
788 	secno = left_start;
789 skip_left:
790 	hint = secno;
791 	segno = secno * sbi->segs_per_sec;
792 	zoneno = secno / sbi->secs_per_zone;
793 
794 	/* give up on finding another zone */
795 	if (!init)
796 		goto got_it;
797 	if (sbi->secs_per_zone == 1)
798 		goto got_it;
799 	if (zoneno == old_zoneno)
800 		goto got_it;
801 	if (dir == ALLOC_LEFT) {
802 		if (!go_left && zoneno + 1 >= total_zones)
803 			goto got_it;
804 		if (go_left && zoneno == 0)
805 			goto got_it;
806 	}
807 	for (i = 0; i < NR_CURSEG_TYPE; i++)
808 		if (CURSEG_I(sbi, i)->zone == zoneno)
809 			break;
810 
811 	if (i < NR_CURSEG_TYPE) {
812 		/* zone is in user, try another */
813 		if (go_left)
814 			hint = zoneno * sbi->secs_per_zone - 1;
815 		else if (zoneno + 1 >= total_zones)
816 			hint = 0;
817 		else
818 			hint = (zoneno + 1) * sbi->secs_per_zone;
819 		init = false;
820 		goto find_other_zone;
821 	}
822 got_it:
823 	/* set it as dirty segment in free segmap */
824 	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
825 	__set_inuse(sbi, segno);
826 	*newseg = segno;
827 	write_unlock(&free_i->segmap_lock);
828 }
829 
830 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
831 {
832 	struct curseg_info *curseg = CURSEG_I(sbi, type);
833 	struct summary_footer *sum_footer;
834 
835 	curseg->segno = curseg->next_segno;
836 	curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
837 	curseg->next_blkoff = 0;
838 	curseg->next_segno = NULL_SEGNO;
839 
840 	sum_footer = &(curseg->sum_blk->footer);
841 	memset(sum_footer, 0, sizeof(struct summary_footer));
842 	if (IS_DATASEG(type))
843 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
844 	if (IS_NODESEG(type))
845 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
846 	__set_sit_entry_type(sbi, type, curseg->segno, modified);
847 }
848 
849 /*
850  * Allocate a current working segment.
851  * This function always allocates a free segment in LFS manner.
852  */
853 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
854 {
855 	struct curseg_info *curseg = CURSEG_I(sbi, type);
856 	unsigned int segno = curseg->segno;
857 	int dir = ALLOC_LEFT;
858 
859 	write_sum_page(sbi, curseg->sum_blk,
860 				GET_SUM_BLOCK(sbi, segno));
861 	if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
862 		dir = ALLOC_RIGHT;
863 
864 	if (test_opt(sbi, NOHEAP))
865 		dir = ALLOC_RIGHT;
866 
867 	get_new_segment(sbi, &segno, new_sec, dir);
868 	curseg->next_segno = segno;
869 	reset_curseg(sbi, type, 1);
870 	curseg->alloc_type = LFS;
871 }
872 
873 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
874 			struct curseg_info *seg, block_t start)
875 {
876 	struct seg_entry *se = get_seg_entry(sbi, seg->segno);
877 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
878 	unsigned long target_map[entries];
879 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
880 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
881 	int i, pos;
882 
883 	for (i = 0; i < entries; i++)
884 		target_map[i] = ckpt_map[i] | cur_map[i];
885 
886 	pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
887 
888 	seg->next_blkoff = pos;
889 }
890 
891 /*
892  * If a segment is written by LFS manner, next block offset is just obtained
893  * by increasing the current block offset. However, if a segment is written by
894  * SSR manner, next block offset obtained by calling __next_free_blkoff
895  */
896 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
897 				struct curseg_info *seg)
898 {
899 	if (seg->alloc_type == SSR)
900 		__next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
901 	else
902 		seg->next_blkoff++;
903 }
904 
905 /*
906  * This function always allocates a used segment(from dirty seglist) by SSR
907  * manner, so it should recover the existing segment information of valid blocks
908  */
909 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
910 {
911 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
912 	struct curseg_info *curseg = CURSEG_I(sbi, type);
913 	unsigned int new_segno = curseg->next_segno;
914 	struct f2fs_summary_block *sum_node;
915 	struct page *sum_page;
916 
917 	write_sum_page(sbi, curseg->sum_blk,
918 				GET_SUM_BLOCK(sbi, curseg->segno));
919 	__set_test_and_inuse(sbi, new_segno);
920 
921 	mutex_lock(&dirty_i->seglist_lock);
922 	__remove_dirty_segment(sbi, new_segno, PRE);
923 	__remove_dirty_segment(sbi, new_segno, DIRTY);
924 	mutex_unlock(&dirty_i->seglist_lock);
925 
926 	reset_curseg(sbi, type, 1);
927 	curseg->alloc_type = SSR;
928 	__next_free_blkoff(sbi, curseg, 0);
929 
930 	if (reuse) {
931 		sum_page = get_sum_page(sbi, new_segno);
932 		sum_node = (struct f2fs_summary_block *)page_address(sum_page);
933 		memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
934 		f2fs_put_page(sum_page, 1);
935 	}
936 }
937 
938 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
939 {
940 	struct curseg_info *curseg = CURSEG_I(sbi, type);
941 	const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
942 
943 	if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
944 		return v_ops->get_victim(sbi,
945 				&(curseg)->next_segno, BG_GC, type, SSR);
946 
947 	/* For data segments, let's do SSR more intensively */
948 	for (; type >= CURSEG_HOT_DATA; type--)
949 		if (v_ops->get_victim(sbi, &(curseg)->next_segno,
950 						BG_GC, type, SSR))
951 			return 1;
952 	return 0;
953 }
954 
955 /*
956  * flush out current segment and replace it with new segment
957  * This function should be returned with success, otherwise BUG
958  */
959 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
960 						int type, bool force)
961 {
962 	struct curseg_info *curseg = CURSEG_I(sbi, type);
963 
964 	if (force)
965 		new_curseg(sbi, type, true);
966 	else if (type == CURSEG_WARM_NODE)
967 		new_curseg(sbi, type, false);
968 	else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
969 		new_curseg(sbi, type, false);
970 	else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
971 		change_curseg(sbi, type, true);
972 	else
973 		new_curseg(sbi, type, false);
974 
975 	stat_inc_seg_type(sbi, curseg);
976 }
977 
978 void allocate_new_segments(struct f2fs_sb_info *sbi)
979 {
980 	struct curseg_info *curseg;
981 	unsigned int old_curseg;
982 	int i;
983 
984 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
985 		curseg = CURSEG_I(sbi, i);
986 		old_curseg = curseg->segno;
987 		SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
988 		locate_dirty_segment(sbi, old_curseg);
989 	}
990 }
991 
992 static const struct segment_allocation default_salloc_ops = {
993 	.allocate_segment = allocate_segment_by_default,
994 };
995 
996 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
997 {
998 	__u64 start = range->start >> sbi->log_blocksize;
999 	__u64 end = start + (range->len >> sbi->log_blocksize) - 1;
1000 	unsigned int start_segno, end_segno;
1001 	struct cp_control cpc;
1002 
1003 	if (range->minlen > SEGMENT_SIZE(sbi) || start >= MAX_BLKADDR(sbi) ||
1004 						range->len < sbi->blocksize)
1005 		return -EINVAL;
1006 
1007 	if (end <= MAIN_BLKADDR(sbi))
1008 		goto out;
1009 
1010 	/* start/end segment number in main_area */
1011 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
1012 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
1013 						GET_SEGNO(sbi, end);
1014 	cpc.reason = CP_DISCARD;
1015 	cpc.trim_start = start_segno;
1016 	cpc.trim_end = end_segno;
1017 	cpc.trim_minlen = range->minlen >> sbi->log_blocksize;
1018 	cpc.trimmed = 0;
1019 
1020 	/* do checkpoint to issue discard commands safely */
1021 	write_checkpoint(sbi, &cpc);
1022 out:
1023 	range->len = cpc.trimmed << sbi->log_blocksize;
1024 	return 0;
1025 }
1026 
1027 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
1028 {
1029 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1030 	if (curseg->next_blkoff < sbi->blocks_per_seg)
1031 		return true;
1032 	return false;
1033 }
1034 
1035 static int __get_segment_type_2(struct page *page, enum page_type p_type)
1036 {
1037 	if (p_type == DATA)
1038 		return CURSEG_HOT_DATA;
1039 	else
1040 		return CURSEG_HOT_NODE;
1041 }
1042 
1043 static int __get_segment_type_4(struct page *page, enum page_type p_type)
1044 {
1045 	if (p_type == DATA) {
1046 		struct inode *inode = page->mapping->host;
1047 
1048 		if (S_ISDIR(inode->i_mode))
1049 			return CURSEG_HOT_DATA;
1050 		else
1051 			return CURSEG_COLD_DATA;
1052 	} else {
1053 		if (IS_DNODE(page) && !is_cold_node(page))
1054 			return CURSEG_HOT_NODE;
1055 		else
1056 			return CURSEG_COLD_NODE;
1057 	}
1058 }
1059 
1060 static int __get_segment_type_6(struct page *page, enum page_type p_type)
1061 {
1062 	if (p_type == DATA) {
1063 		struct inode *inode = page->mapping->host;
1064 
1065 		if (S_ISDIR(inode->i_mode))
1066 			return CURSEG_HOT_DATA;
1067 		else if (is_cold_data(page) || file_is_cold(inode))
1068 			return CURSEG_COLD_DATA;
1069 		else
1070 			return CURSEG_WARM_DATA;
1071 	} else {
1072 		if (IS_DNODE(page))
1073 			return is_cold_node(page) ? CURSEG_WARM_NODE :
1074 						CURSEG_HOT_NODE;
1075 		else
1076 			return CURSEG_COLD_NODE;
1077 	}
1078 }
1079 
1080 static int __get_segment_type(struct page *page, enum page_type p_type)
1081 {
1082 	switch (F2FS_P_SB(page)->active_logs) {
1083 	case 2:
1084 		return __get_segment_type_2(page, p_type);
1085 	case 4:
1086 		return __get_segment_type_4(page, p_type);
1087 	}
1088 	/* NR_CURSEG_TYPE(6) logs by default */
1089 	f2fs_bug_on(F2FS_P_SB(page),
1090 		F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
1091 	return __get_segment_type_6(page, p_type);
1092 }
1093 
1094 void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
1095 		block_t old_blkaddr, block_t *new_blkaddr,
1096 		struct f2fs_summary *sum, int type)
1097 {
1098 	struct sit_info *sit_i = SIT_I(sbi);
1099 	struct curseg_info *curseg;
1100 
1101 	curseg = CURSEG_I(sbi, type);
1102 
1103 	mutex_lock(&curseg->curseg_mutex);
1104 
1105 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
1106 
1107 	/*
1108 	 * __add_sum_entry should be resided under the curseg_mutex
1109 	 * because, this function updates a summary entry in the
1110 	 * current summary block.
1111 	 */
1112 	__add_sum_entry(sbi, type, sum);
1113 
1114 	mutex_lock(&sit_i->sentry_lock);
1115 	__refresh_next_blkoff(sbi, curseg);
1116 
1117 	stat_inc_block_count(sbi, curseg);
1118 
1119 	if (!__has_curseg_space(sbi, type))
1120 		sit_i->s_ops->allocate_segment(sbi, type, false);
1121 	/*
1122 	 * SIT information should be updated before segment allocation,
1123 	 * since SSR needs latest valid block information.
1124 	 */
1125 	refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
1126 
1127 	mutex_unlock(&sit_i->sentry_lock);
1128 
1129 	if (page && IS_NODESEG(type))
1130 		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
1131 
1132 	mutex_unlock(&curseg->curseg_mutex);
1133 }
1134 
1135 static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
1136 			block_t old_blkaddr, block_t *new_blkaddr,
1137 			struct f2fs_summary *sum, struct f2fs_io_info *fio)
1138 {
1139 	int type = __get_segment_type(page, fio->type);
1140 
1141 	allocate_data_block(sbi, page, old_blkaddr, new_blkaddr, sum, type);
1142 
1143 	/* writeout dirty page into bdev */
1144 	f2fs_submit_page_mbio(sbi, page, *new_blkaddr, fio);
1145 }
1146 
1147 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
1148 {
1149 	struct f2fs_io_info fio = {
1150 		.type = META,
1151 		.rw = WRITE_SYNC | REQ_META | REQ_PRIO
1152 	};
1153 
1154 	set_page_writeback(page);
1155 	f2fs_submit_page_mbio(sbi, page, page->index, &fio);
1156 }
1157 
1158 void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
1159 		struct f2fs_io_info *fio,
1160 		unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
1161 {
1162 	struct f2fs_summary sum;
1163 	set_summary(&sum, nid, 0, 0);
1164 	do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, fio);
1165 }
1166 
1167 void write_data_page(struct page *page, struct dnode_of_data *dn,
1168 		block_t *new_blkaddr, struct f2fs_io_info *fio)
1169 {
1170 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1171 	struct f2fs_summary sum;
1172 	struct node_info ni;
1173 
1174 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
1175 	get_node_info(sbi, dn->nid, &ni);
1176 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1177 
1178 	do_write_page(sbi, page, dn->data_blkaddr, new_blkaddr, &sum, fio);
1179 }
1180 
1181 void rewrite_data_page(struct page *page, block_t old_blkaddr,
1182 					struct f2fs_io_info *fio)
1183 {
1184 	f2fs_submit_page_mbio(F2FS_P_SB(page), page, old_blkaddr, fio);
1185 }
1186 
1187 void recover_data_page(struct f2fs_sb_info *sbi,
1188 			struct page *page, struct f2fs_summary *sum,
1189 			block_t old_blkaddr, block_t new_blkaddr)
1190 {
1191 	struct sit_info *sit_i = SIT_I(sbi);
1192 	struct curseg_info *curseg;
1193 	unsigned int segno, old_cursegno;
1194 	struct seg_entry *se;
1195 	int type;
1196 
1197 	segno = GET_SEGNO(sbi, new_blkaddr);
1198 	se = get_seg_entry(sbi, segno);
1199 	type = se->type;
1200 
1201 	if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
1202 		if (old_blkaddr == NULL_ADDR)
1203 			type = CURSEG_COLD_DATA;
1204 		else
1205 			type = CURSEG_WARM_DATA;
1206 	}
1207 	curseg = CURSEG_I(sbi, type);
1208 
1209 	mutex_lock(&curseg->curseg_mutex);
1210 	mutex_lock(&sit_i->sentry_lock);
1211 
1212 	old_cursegno = curseg->segno;
1213 
1214 	/* change the current segment */
1215 	if (segno != curseg->segno) {
1216 		curseg->next_segno = segno;
1217 		change_curseg(sbi, type, true);
1218 	}
1219 
1220 	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
1221 	__add_sum_entry(sbi, type, sum);
1222 
1223 	refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
1224 	locate_dirty_segment(sbi, old_cursegno);
1225 
1226 	mutex_unlock(&sit_i->sentry_lock);
1227 	mutex_unlock(&curseg->curseg_mutex);
1228 }
1229 
1230 static inline bool is_merged_page(struct f2fs_sb_info *sbi,
1231 					struct page *page, enum page_type type)
1232 {
1233 	enum page_type btype = PAGE_TYPE_OF_BIO(type);
1234 	struct f2fs_bio_info *io = &sbi->write_io[btype];
1235 	struct bio_vec *bvec;
1236 	int i;
1237 
1238 	down_read(&io->io_rwsem);
1239 	if (!io->bio)
1240 		goto out;
1241 
1242 	bio_for_each_segment_all(bvec, io->bio, i) {
1243 		if (page == bvec->bv_page) {
1244 			up_read(&io->io_rwsem);
1245 			return true;
1246 		}
1247 	}
1248 
1249 out:
1250 	up_read(&io->io_rwsem);
1251 	return false;
1252 }
1253 
1254 void f2fs_wait_on_page_writeback(struct page *page,
1255 				enum page_type type)
1256 {
1257 	if (PageWriteback(page)) {
1258 		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1259 
1260 		if (is_merged_page(sbi, page, type))
1261 			f2fs_submit_merged_bio(sbi, type, WRITE);
1262 		wait_on_page_writeback(page);
1263 	}
1264 }
1265 
1266 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1267 {
1268 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1269 	struct curseg_info *seg_i;
1270 	unsigned char *kaddr;
1271 	struct page *page;
1272 	block_t start;
1273 	int i, j, offset;
1274 
1275 	start = start_sum_block(sbi);
1276 
1277 	page = get_meta_page(sbi, start++);
1278 	kaddr = (unsigned char *)page_address(page);
1279 
1280 	/* Step 1: restore nat cache */
1281 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1282 	memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
1283 
1284 	/* Step 2: restore sit cache */
1285 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1286 	memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
1287 						SUM_JOURNAL_SIZE);
1288 	offset = 2 * SUM_JOURNAL_SIZE;
1289 
1290 	/* Step 3: restore summary entries */
1291 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1292 		unsigned short blk_off;
1293 		unsigned int segno;
1294 
1295 		seg_i = CURSEG_I(sbi, i);
1296 		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1297 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1298 		seg_i->next_segno = segno;
1299 		reset_curseg(sbi, i, 0);
1300 		seg_i->alloc_type = ckpt->alloc_type[i];
1301 		seg_i->next_blkoff = blk_off;
1302 
1303 		if (seg_i->alloc_type == SSR)
1304 			blk_off = sbi->blocks_per_seg;
1305 
1306 		for (j = 0; j < blk_off; j++) {
1307 			struct f2fs_summary *s;
1308 			s = (struct f2fs_summary *)(kaddr + offset);
1309 			seg_i->sum_blk->entries[j] = *s;
1310 			offset += SUMMARY_SIZE;
1311 			if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1312 						SUM_FOOTER_SIZE)
1313 				continue;
1314 
1315 			f2fs_put_page(page, 1);
1316 			page = NULL;
1317 
1318 			page = get_meta_page(sbi, start++);
1319 			kaddr = (unsigned char *)page_address(page);
1320 			offset = 0;
1321 		}
1322 	}
1323 	f2fs_put_page(page, 1);
1324 	return 0;
1325 }
1326 
1327 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1328 {
1329 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1330 	struct f2fs_summary_block *sum;
1331 	struct curseg_info *curseg;
1332 	struct page *new;
1333 	unsigned short blk_off;
1334 	unsigned int segno = 0;
1335 	block_t blk_addr = 0;
1336 
1337 	/* get segment number and block addr */
1338 	if (IS_DATASEG(type)) {
1339 		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1340 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1341 							CURSEG_HOT_DATA]);
1342 		if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
1343 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1344 		else
1345 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1346 	} else {
1347 		segno = le32_to_cpu(ckpt->cur_node_segno[type -
1348 							CURSEG_HOT_NODE]);
1349 		blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1350 							CURSEG_HOT_NODE]);
1351 		if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
1352 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1353 							type - CURSEG_HOT_NODE);
1354 		else
1355 			blk_addr = GET_SUM_BLOCK(sbi, segno);
1356 	}
1357 
1358 	new = get_meta_page(sbi, blk_addr);
1359 	sum = (struct f2fs_summary_block *)page_address(new);
1360 
1361 	if (IS_NODESEG(type)) {
1362 		if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
1363 			struct f2fs_summary *ns = &sum->entries[0];
1364 			int i;
1365 			for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1366 				ns->version = 0;
1367 				ns->ofs_in_node = 0;
1368 			}
1369 		} else {
1370 			int err;
1371 
1372 			err = restore_node_summary(sbi, segno, sum);
1373 			if (err) {
1374 				f2fs_put_page(new, 1);
1375 				return err;
1376 			}
1377 		}
1378 	}
1379 
1380 	/* set uncompleted segment to curseg */
1381 	curseg = CURSEG_I(sbi, type);
1382 	mutex_lock(&curseg->curseg_mutex);
1383 	memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1384 	curseg->next_segno = segno;
1385 	reset_curseg(sbi, type, 0);
1386 	curseg->alloc_type = ckpt->alloc_type[type];
1387 	curseg->next_blkoff = blk_off;
1388 	mutex_unlock(&curseg->curseg_mutex);
1389 	f2fs_put_page(new, 1);
1390 	return 0;
1391 }
1392 
1393 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1394 {
1395 	int type = CURSEG_HOT_DATA;
1396 	int err;
1397 
1398 	if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
1399 		/* restore for compacted data summary */
1400 		if (read_compacted_summaries(sbi))
1401 			return -EINVAL;
1402 		type = CURSEG_HOT_NODE;
1403 	}
1404 
1405 	for (; type <= CURSEG_COLD_NODE; type++) {
1406 		err = read_normal_summaries(sbi, type);
1407 		if (err)
1408 			return err;
1409 	}
1410 
1411 	return 0;
1412 }
1413 
1414 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1415 {
1416 	struct page *page;
1417 	unsigned char *kaddr;
1418 	struct f2fs_summary *summary;
1419 	struct curseg_info *seg_i;
1420 	int written_size = 0;
1421 	int i, j;
1422 
1423 	page = grab_meta_page(sbi, blkaddr++);
1424 	kaddr = (unsigned char *)page_address(page);
1425 
1426 	/* Step 1: write nat cache */
1427 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1428 	memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1429 	written_size += SUM_JOURNAL_SIZE;
1430 
1431 	/* Step 2: write sit cache */
1432 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1433 	memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1434 						SUM_JOURNAL_SIZE);
1435 	written_size += SUM_JOURNAL_SIZE;
1436 
1437 	/* Step 3: write summary entries */
1438 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1439 		unsigned short blkoff;
1440 		seg_i = CURSEG_I(sbi, i);
1441 		if (sbi->ckpt->alloc_type[i] == SSR)
1442 			blkoff = sbi->blocks_per_seg;
1443 		else
1444 			blkoff = curseg_blkoff(sbi, i);
1445 
1446 		for (j = 0; j < blkoff; j++) {
1447 			if (!page) {
1448 				page = grab_meta_page(sbi, blkaddr++);
1449 				kaddr = (unsigned char *)page_address(page);
1450 				written_size = 0;
1451 			}
1452 			summary = (struct f2fs_summary *)(kaddr + written_size);
1453 			*summary = seg_i->sum_blk->entries[j];
1454 			written_size += SUMMARY_SIZE;
1455 
1456 			if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1457 							SUM_FOOTER_SIZE)
1458 				continue;
1459 
1460 			set_page_dirty(page);
1461 			f2fs_put_page(page, 1);
1462 			page = NULL;
1463 		}
1464 	}
1465 	if (page) {
1466 		set_page_dirty(page);
1467 		f2fs_put_page(page, 1);
1468 	}
1469 }
1470 
1471 static void write_normal_summaries(struct f2fs_sb_info *sbi,
1472 					block_t blkaddr, int type)
1473 {
1474 	int i, end;
1475 	if (IS_DATASEG(type))
1476 		end = type + NR_CURSEG_DATA_TYPE;
1477 	else
1478 		end = type + NR_CURSEG_NODE_TYPE;
1479 
1480 	for (i = type; i < end; i++) {
1481 		struct curseg_info *sum = CURSEG_I(sbi, i);
1482 		mutex_lock(&sum->curseg_mutex);
1483 		write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1484 		mutex_unlock(&sum->curseg_mutex);
1485 	}
1486 }
1487 
1488 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1489 {
1490 	if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
1491 		write_compacted_summaries(sbi, start_blk);
1492 	else
1493 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1494 }
1495 
1496 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1497 {
1498 	if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
1499 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
1500 }
1501 
1502 int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1503 					unsigned int val, int alloc)
1504 {
1505 	int i;
1506 
1507 	if (type == NAT_JOURNAL) {
1508 		for (i = 0; i < nats_in_cursum(sum); i++) {
1509 			if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1510 				return i;
1511 		}
1512 		if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1513 			return update_nats_in_cursum(sum, 1);
1514 	} else if (type == SIT_JOURNAL) {
1515 		for (i = 0; i < sits_in_cursum(sum); i++)
1516 			if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1517 				return i;
1518 		if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1519 			return update_sits_in_cursum(sum, 1);
1520 	}
1521 	return -1;
1522 }
1523 
1524 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1525 					unsigned int segno)
1526 {
1527 	struct sit_info *sit_i = SIT_I(sbi);
1528 	unsigned int offset = SIT_BLOCK_OFFSET(segno);
1529 	block_t blk_addr = sit_i->sit_base_addr + offset;
1530 
1531 	check_seg_range(sbi, segno);
1532 
1533 	/* calculate sit block address */
1534 	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1535 		blk_addr += sit_i->sit_blocks;
1536 
1537 	return get_meta_page(sbi, blk_addr);
1538 }
1539 
1540 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1541 					unsigned int start)
1542 {
1543 	struct sit_info *sit_i = SIT_I(sbi);
1544 	struct page *src_page, *dst_page;
1545 	pgoff_t src_off, dst_off;
1546 	void *src_addr, *dst_addr;
1547 
1548 	src_off = current_sit_addr(sbi, start);
1549 	dst_off = next_sit_addr(sbi, src_off);
1550 
1551 	/* get current sit block page without lock */
1552 	src_page = get_meta_page(sbi, src_off);
1553 	dst_page = grab_meta_page(sbi, dst_off);
1554 	f2fs_bug_on(sbi, PageDirty(src_page));
1555 
1556 	src_addr = page_address(src_page);
1557 	dst_addr = page_address(dst_page);
1558 	memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1559 
1560 	set_page_dirty(dst_page);
1561 	f2fs_put_page(src_page, 1);
1562 
1563 	set_to_next_sit(sit_i, start);
1564 
1565 	return dst_page;
1566 }
1567 
1568 static struct sit_entry_set *grab_sit_entry_set(void)
1569 {
1570 	struct sit_entry_set *ses =
1571 			f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_ATOMIC);
1572 
1573 	ses->entry_cnt = 0;
1574 	INIT_LIST_HEAD(&ses->set_list);
1575 	return ses;
1576 }
1577 
1578 static void release_sit_entry_set(struct sit_entry_set *ses)
1579 {
1580 	list_del(&ses->set_list);
1581 	kmem_cache_free(sit_entry_set_slab, ses);
1582 }
1583 
1584 static void adjust_sit_entry_set(struct sit_entry_set *ses,
1585 						struct list_head *head)
1586 {
1587 	struct sit_entry_set *next = ses;
1588 
1589 	if (list_is_last(&ses->set_list, head))
1590 		return;
1591 
1592 	list_for_each_entry_continue(next, head, set_list)
1593 		if (ses->entry_cnt <= next->entry_cnt)
1594 			break;
1595 
1596 	list_move_tail(&ses->set_list, &next->set_list);
1597 }
1598 
1599 static void add_sit_entry(unsigned int segno, struct list_head *head)
1600 {
1601 	struct sit_entry_set *ses;
1602 	unsigned int start_segno = START_SEGNO(segno);
1603 
1604 	list_for_each_entry(ses, head, set_list) {
1605 		if (ses->start_segno == start_segno) {
1606 			ses->entry_cnt++;
1607 			adjust_sit_entry_set(ses, head);
1608 			return;
1609 		}
1610 	}
1611 
1612 	ses = grab_sit_entry_set();
1613 
1614 	ses->start_segno = start_segno;
1615 	ses->entry_cnt++;
1616 	list_add(&ses->set_list, head);
1617 }
1618 
1619 static void add_sits_in_set(struct f2fs_sb_info *sbi)
1620 {
1621 	struct f2fs_sm_info *sm_info = SM_I(sbi);
1622 	struct list_head *set_list = &sm_info->sit_entry_set;
1623 	unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
1624 	unsigned int segno;
1625 
1626 	for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
1627 		add_sit_entry(segno, set_list);
1628 }
1629 
1630 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
1631 {
1632 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1633 	struct f2fs_summary_block *sum = curseg->sum_blk;
1634 	int i;
1635 
1636 	for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1637 		unsigned int segno;
1638 		bool dirtied;
1639 
1640 		segno = le32_to_cpu(segno_in_journal(sum, i));
1641 		dirtied = __mark_sit_entry_dirty(sbi, segno);
1642 
1643 		if (!dirtied)
1644 			add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
1645 	}
1646 	update_sits_in_cursum(sum, -sits_in_cursum(sum));
1647 }
1648 
1649 /*
1650  * CP calls this function, which flushes SIT entries including sit_journal,
1651  * and moves prefree segs to free segs.
1652  */
1653 void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1654 {
1655 	struct sit_info *sit_i = SIT_I(sbi);
1656 	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1657 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1658 	struct f2fs_summary_block *sum = curseg->sum_blk;
1659 	struct sit_entry_set *ses, *tmp;
1660 	struct list_head *head = &SM_I(sbi)->sit_entry_set;
1661 	bool to_journal = true;
1662 	struct seg_entry *se;
1663 
1664 	mutex_lock(&curseg->curseg_mutex);
1665 	mutex_lock(&sit_i->sentry_lock);
1666 
1667 	/*
1668 	 * add and account sit entries of dirty bitmap in sit entry
1669 	 * set temporarily
1670 	 */
1671 	add_sits_in_set(sbi);
1672 
1673 	/*
1674 	 * if there are no enough space in journal to store dirty sit
1675 	 * entries, remove all entries from journal and add and account
1676 	 * them in sit entry set.
1677 	 */
1678 	if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL))
1679 		remove_sits_in_journal(sbi);
1680 
1681 	if (!sit_i->dirty_sentries)
1682 		goto out;
1683 
1684 	/*
1685 	 * there are two steps to flush sit entries:
1686 	 * #1, flush sit entries to journal in current cold data summary block.
1687 	 * #2, flush sit entries to sit page.
1688 	 */
1689 	list_for_each_entry_safe(ses, tmp, head, set_list) {
1690 		struct page *page;
1691 		struct f2fs_sit_block *raw_sit = NULL;
1692 		unsigned int start_segno = ses->start_segno;
1693 		unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
1694 						(unsigned long)MAIN_SEGS(sbi));
1695 		unsigned int segno = start_segno;
1696 
1697 		if (to_journal &&
1698 			!__has_cursum_space(sum, ses->entry_cnt, SIT_JOURNAL))
1699 			to_journal = false;
1700 
1701 		if (!to_journal) {
1702 			page = get_next_sit_page(sbi, start_segno);
1703 			raw_sit = page_address(page);
1704 		}
1705 
1706 		/* flush dirty sit entries in region of current sit set */
1707 		for_each_set_bit_from(segno, bitmap, end) {
1708 			int offset, sit_offset;
1709 
1710 			se = get_seg_entry(sbi, segno);
1711 
1712 			/* add discard candidates */
1713 			if (SM_I(sbi)->nr_discards < SM_I(sbi)->max_discards) {
1714 				cpc->trim_start = segno;
1715 				add_discard_addrs(sbi, cpc);
1716 			}
1717 
1718 			if (to_journal) {
1719 				offset = lookup_journal_in_cursum(sum,
1720 							SIT_JOURNAL, segno, 1);
1721 				f2fs_bug_on(sbi, offset < 0);
1722 				segno_in_journal(sum, offset) =
1723 							cpu_to_le32(segno);
1724 				seg_info_to_raw_sit(se,
1725 						&sit_in_journal(sum, offset));
1726 			} else {
1727 				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1728 				seg_info_to_raw_sit(se,
1729 						&raw_sit->entries[sit_offset]);
1730 			}
1731 
1732 			__clear_bit(segno, bitmap);
1733 			sit_i->dirty_sentries--;
1734 			ses->entry_cnt--;
1735 		}
1736 
1737 		if (!to_journal)
1738 			f2fs_put_page(page, 1);
1739 
1740 		f2fs_bug_on(sbi, ses->entry_cnt);
1741 		release_sit_entry_set(ses);
1742 	}
1743 
1744 	f2fs_bug_on(sbi, !list_empty(head));
1745 	f2fs_bug_on(sbi, sit_i->dirty_sentries);
1746 out:
1747 	if (cpc->reason == CP_DISCARD) {
1748 		for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
1749 			add_discard_addrs(sbi, cpc);
1750 	}
1751 	mutex_unlock(&sit_i->sentry_lock);
1752 	mutex_unlock(&curseg->curseg_mutex);
1753 
1754 	set_prefree_as_free_segments(sbi);
1755 }
1756 
1757 static int build_sit_info(struct f2fs_sb_info *sbi)
1758 {
1759 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1760 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1761 	struct sit_info *sit_i;
1762 	unsigned int sit_segs, start;
1763 	char *src_bitmap, *dst_bitmap;
1764 	unsigned int bitmap_size;
1765 
1766 	/* allocate memory for SIT information */
1767 	sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1768 	if (!sit_i)
1769 		return -ENOMEM;
1770 
1771 	SM_I(sbi)->sit_info = sit_i;
1772 
1773 	sit_i->sentries = vzalloc(MAIN_SEGS(sbi) * sizeof(struct seg_entry));
1774 	if (!sit_i->sentries)
1775 		return -ENOMEM;
1776 
1777 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
1778 	sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1779 	if (!sit_i->dirty_sentries_bitmap)
1780 		return -ENOMEM;
1781 
1782 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
1783 		sit_i->sentries[start].cur_valid_map
1784 			= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1785 		sit_i->sentries[start].ckpt_valid_map
1786 			= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1787 		if (!sit_i->sentries[start].cur_valid_map
1788 				|| !sit_i->sentries[start].ckpt_valid_map)
1789 			return -ENOMEM;
1790 	}
1791 
1792 	if (sbi->segs_per_sec > 1) {
1793 		sit_i->sec_entries = vzalloc(MAIN_SECS(sbi) *
1794 					sizeof(struct sec_entry));
1795 		if (!sit_i->sec_entries)
1796 			return -ENOMEM;
1797 	}
1798 
1799 	/* get information related with SIT */
1800 	sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1801 
1802 	/* setup SIT bitmap from ckeckpoint pack */
1803 	bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1804 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1805 
1806 	dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
1807 	if (!dst_bitmap)
1808 		return -ENOMEM;
1809 
1810 	/* init SIT information */
1811 	sit_i->s_ops = &default_salloc_ops;
1812 
1813 	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1814 	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1815 	sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1816 	sit_i->sit_bitmap = dst_bitmap;
1817 	sit_i->bitmap_size = bitmap_size;
1818 	sit_i->dirty_sentries = 0;
1819 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1820 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1821 	sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1822 	mutex_init(&sit_i->sentry_lock);
1823 	return 0;
1824 }
1825 
1826 static int build_free_segmap(struct f2fs_sb_info *sbi)
1827 {
1828 	struct free_segmap_info *free_i;
1829 	unsigned int bitmap_size, sec_bitmap_size;
1830 
1831 	/* allocate memory for free segmap information */
1832 	free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1833 	if (!free_i)
1834 		return -ENOMEM;
1835 
1836 	SM_I(sbi)->free_info = free_i;
1837 
1838 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
1839 	free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1840 	if (!free_i->free_segmap)
1841 		return -ENOMEM;
1842 
1843 	sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
1844 	free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1845 	if (!free_i->free_secmap)
1846 		return -ENOMEM;
1847 
1848 	/* set all segments as dirty temporarily */
1849 	memset(free_i->free_segmap, 0xff, bitmap_size);
1850 	memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1851 
1852 	/* init free segmap information */
1853 	free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
1854 	free_i->free_segments = 0;
1855 	free_i->free_sections = 0;
1856 	rwlock_init(&free_i->segmap_lock);
1857 	return 0;
1858 }
1859 
1860 static int build_curseg(struct f2fs_sb_info *sbi)
1861 {
1862 	struct curseg_info *array;
1863 	int i;
1864 
1865 	array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
1866 	if (!array)
1867 		return -ENOMEM;
1868 
1869 	SM_I(sbi)->curseg_array = array;
1870 
1871 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
1872 		mutex_init(&array[i].curseg_mutex);
1873 		array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
1874 		if (!array[i].sum_blk)
1875 			return -ENOMEM;
1876 		array[i].segno = NULL_SEGNO;
1877 		array[i].next_blkoff = 0;
1878 	}
1879 	return restore_curseg_summaries(sbi);
1880 }
1881 
1882 static void build_sit_entries(struct f2fs_sb_info *sbi)
1883 {
1884 	struct sit_info *sit_i = SIT_I(sbi);
1885 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1886 	struct f2fs_summary_block *sum = curseg->sum_blk;
1887 	int sit_blk_cnt = SIT_BLK_CNT(sbi);
1888 	unsigned int i, start, end;
1889 	unsigned int readed, start_blk = 0;
1890 	int nrpages = MAX_BIO_BLOCKS(sbi);
1891 
1892 	do {
1893 		readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT);
1894 
1895 		start = start_blk * sit_i->sents_per_block;
1896 		end = (start_blk + readed) * sit_i->sents_per_block;
1897 
1898 		for (; start < end && start < MAIN_SEGS(sbi); start++) {
1899 			struct seg_entry *se = &sit_i->sentries[start];
1900 			struct f2fs_sit_block *sit_blk;
1901 			struct f2fs_sit_entry sit;
1902 			struct page *page;
1903 
1904 			mutex_lock(&curseg->curseg_mutex);
1905 			for (i = 0; i < sits_in_cursum(sum); i++) {
1906 				if (le32_to_cpu(segno_in_journal(sum, i))
1907 								== start) {
1908 					sit = sit_in_journal(sum, i);
1909 					mutex_unlock(&curseg->curseg_mutex);
1910 					goto got_it;
1911 				}
1912 			}
1913 			mutex_unlock(&curseg->curseg_mutex);
1914 
1915 			page = get_current_sit_page(sbi, start);
1916 			sit_blk = (struct f2fs_sit_block *)page_address(page);
1917 			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
1918 			f2fs_put_page(page, 1);
1919 got_it:
1920 			check_block_count(sbi, start, &sit);
1921 			seg_info_from_raw_sit(se, &sit);
1922 			if (sbi->segs_per_sec > 1) {
1923 				struct sec_entry *e = get_sec_entry(sbi, start);
1924 				e->valid_blocks += se->valid_blocks;
1925 			}
1926 		}
1927 		start_blk += readed;
1928 	} while (start_blk < sit_blk_cnt);
1929 }
1930 
1931 static void init_free_segmap(struct f2fs_sb_info *sbi)
1932 {
1933 	unsigned int start;
1934 	int type;
1935 
1936 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
1937 		struct seg_entry *sentry = get_seg_entry(sbi, start);
1938 		if (!sentry->valid_blocks)
1939 			__set_free(sbi, start);
1940 	}
1941 
1942 	/* set use the current segments */
1943 	for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
1944 		struct curseg_info *curseg_t = CURSEG_I(sbi, type);
1945 		__set_test_and_inuse(sbi, curseg_t->segno);
1946 	}
1947 }
1948 
1949 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
1950 {
1951 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1952 	struct free_segmap_info *free_i = FREE_I(sbi);
1953 	unsigned int segno = 0, offset = 0;
1954 	unsigned short valid_blocks;
1955 
1956 	while (1) {
1957 		/* find dirty segment based on free segmap */
1958 		segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
1959 		if (segno >= MAIN_SEGS(sbi))
1960 			break;
1961 		offset = segno + 1;
1962 		valid_blocks = get_valid_blocks(sbi, segno, 0);
1963 		if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
1964 			continue;
1965 		if (valid_blocks > sbi->blocks_per_seg) {
1966 			f2fs_bug_on(sbi, 1);
1967 			continue;
1968 		}
1969 		mutex_lock(&dirty_i->seglist_lock);
1970 		__locate_dirty_segment(sbi, segno, DIRTY);
1971 		mutex_unlock(&dirty_i->seglist_lock);
1972 	}
1973 }
1974 
1975 static int init_victim_secmap(struct f2fs_sb_info *sbi)
1976 {
1977 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1978 	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
1979 
1980 	dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
1981 	if (!dirty_i->victim_secmap)
1982 		return -ENOMEM;
1983 	return 0;
1984 }
1985 
1986 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
1987 {
1988 	struct dirty_seglist_info *dirty_i;
1989 	unsigned int bitmap_size, i;
1990 
1991 	/* allocate memory for dirty segments list information */
1992 	dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
1993 	if (!dirty_i)
1994 		return -ENOMEM;
1995 
1996 	SM_I(sbi)->dirty_info = dirty_i;
1997 	mutex_init(&dirty_i->seglist_lock);
1998 
1999 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2000 
2001 	for (i = 0; i < NR_DIRTY_TYPE; i++) {
2002 		dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
2003 		if (!dirty_i->dirty_segmap[i])
2004 			return -ENOMEM;
2005 	}
2006 
2007 	init_dirty_segmap(sbi);
2008 	return init_victim_secmap(sbi);
2009 }
2010 
2011 /*
2012  * Update min, max modified time for cost-benefit GC algorithm
2013  */
2014 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
2015 {
2016 	struct sit_info *sit_i = SIT_I(sbi);
2017 	unsigned int segno;
2018 
2019 	mutex_lock(&sit_i->sentry_lock);
2020 
2021 	sit_i->min_mtime = LLONG_MAX;
2022 
2023 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
2024 		unsigned int i;
2025 		unsigned long long mtime = 0;
2026 
2027 		for (i = 0; i < sbi->segs_per_sec; i++)
2028 			mtime += get_seg_entry(sbi, segno + i)->mtime;
2029 
2030 		mtime = div_u64(mtime, sbi->segs_per_sec);
2031 
2032 		if (sit_i->min_mtime > mtime)
2033 			sit_i->min_mtime = mtime;
2034 	}
2035 	sit_i->max_mtime = get_mtime(sbi);
2036 	mutex_unlock(&sit_i->sentry_lock);
2037 }
2038 
2039 int build_segment_manager(struct f2fs_sb_info *sbi)
2040 {
2041 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2042 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2043 	struct f2fs_sm_info *sm_info;
2044 	int err;
2045 
2046 	sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
2047 	if (!sm_info)
2048 		return -ENOMEM;
2049 
2050 	/* init sm info */
2051 	sbi->sm_info = sm_info;
2052 	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2053 	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2054 	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
2055 	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
2056 	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
2057 	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
2058 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
2059 	sm_info->rec_prefree_segments = sm_info->main_segments *
2060 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
2061 	sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
2062 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
2063 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
2064 
2065 	INIT_LIST_HEAD(&sm_info->discard_list);
2066 	sm_info->nr_discards = 0;
2067 	sm_info->max_discards = 0;
2068 
2069 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
2070 
2071 	if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
2072 		err = create_flush_cmd_control(sbi);
2073 		if (err)
2074 			return err;
2075 	}
2076 
2077 	err = build_sit_info(sbi);
2078 	if (err)
2079 		return err;
2080 	err = build_free_segmap(sbi);
2081 	if (err)
2082 		return err;
2083 	err = build_curseg(sbi);
2084 	if (err)
2085 		return err;
2086 
2087 	/* reinit free segmap based on SIT */
2088 	build_sit_entries(sbi);
2089 
2090 	init_free_segmap(sbi);
2091 	err = build_dirty_segmap(sbi);
2092 	if (err)
2093 		return err;
2094 
2095 	init_min_max_mtime(sbi);
2096 	return 0;
2097 }
2098 
2099 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
2100 		enum dirty_type dirty_type)
2101 {
2102 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2103 
2104 	mutex_lock(&dirty_i->seglist_lock);
2105 	kfree(dirty_i->dirty_segmap[dirty_type]);
2106 	dirty_i->nr_dirty[dirty_type] = 0;
2107 	mutex_unlock(&dirty_i->seglist_lock);
2108 }
2109 
2110 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
2111 {
2112 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2113 	kfree(dirty_i->victim_secmap);
2114 }
2115 
2116 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
2117 {
2118 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2119 	int i;
2120 
2121 	if (!dirty_i)
2122 		return;
2123 
2124 	/* discard pre-free/dirty segments list */
2125 	for (i = 0; i < NR_DIRTY_TYPE; i++)
2126 		discard_dirty_segmap(sbi, i);
2127 
2128 	destroy_victim_secmap(sbi);
2129 	SM_I(sbi)->dirty_info = NULL;
2130 	kfree(dirty_i);
2131 }
2132 
2133 static void destroy_curseg(struct f2fs_sb_info *sbi)
2134 {
2135 	struct curseg_info *array = SM_I(sbi)->curseg_array;
2136 	int i;
2137 
2138 	if (!array)
2139 		return;
2140 	SM_I(sbi)->curseg_array = NULL;
2141 	for (i = 0; i < NR_CURSEG_TYPE; i++)
2142 		kfree(array[i].sum_blk);
2143 	kfree(array);
2144 }
2145 
2146 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
2147 {
2148 	struct free_segmap_info *free_i = SM_I(sbi)->free_info;
2149 	if (!free_i)
2150 		return;
2151 	SM_I(sbi)->free_info = NULL;
2152 	kfree(free_i->free_segmap);
2153 	kfree(free_i->free_secmap);
2154 	kfree(free_i);
2155 }
2156 
2157 static void destroy_sit_info(struct f2fs_sb_info *sbi)
2158 {
2159 	struct sit_info *sit_i = SIT_I(sbi);
2160 	unsigned int start;
2161 
2162 	if (!sit_i)
2163 		return;
2164 
2165 	if (sit_i->sentries) {
2166 		for (start = 0; start < MAIN_SEGS(sbi); start++) {
2167 			kfree(sit_i->sentries[start].cur_valid_map);
2168 			kfree(sit_i->sentries[start].ckpt_valid_map);
2169 		}
2170 	}
2171 	vfree(sit_i->sentries);
2172 	vfree(sit_i->sec_entries);
2173 	kfree(sit_i->dirty_sentries_bitmap);
2174 
2175 	SM_I(sbi)->sit_info = NULL;
2176 	kfree(sit_i->sit_bitmap);
2177 	kfree(sit_i);
2178 }
2179 
2180 void destroy_segment_manager(struct f2fs_sb_info *sbi)
2181 {
2182 	struct f2fs_sm_info *sm_info = SM_I(sbi);
2183 
2184 	if (!sm_info)
2185 		return;
2186 	destroy_flush_cmd_control(sbi);
2187 	destroy_dirty_segmap(sbi);
2188 	destroy_curseg(sbi);
2189 	destroy_free_segmap(sbi);
2190 	destroy_sit_info(sbi);
2191 	sbi->sm_info = NULL;
2192 	kfree(sm_info);
2193 }
2194 
2195 int __init create_segment_manager_caches(void)
2196 {
2197 	discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
2198 			sizeof(struct discard_entry));
2199 	if (!discard_entry_slab)
2200 		goto fail;
2201 
2202 	sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
2203 			sizeof(struct nat_entry_set));
2204 	if (!sit_entry_set_slab)
2205 		goto destory_discard_entry;
2206 
2207 	inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
2208 			sizeof(struct inmem_pages));
2209 	if (!inmem_entry_slab)
2210 		goto destroy_sit_entry_set;
2211 	return 0;
2212 
2213 destroy_sit_entry_set:
2214 	kmem_cache_destroy(sit_entry_set_slab);
2215 destory_discard_entry:
2216 	kmem_cache_destroy(discard_entry_slab);
2217 fail:
2218 	return -ENOMEM;
2219 }
2220 
2221 void destroy_segment_manager_caches(void)
2222 {
2223 	kmem_cache_destroy(sit_entry_set_slab);
2224 	kmem_cache_destroy(discard_entry_slab);
2225 	kmem_cache_destroy(inmem_entry_slab);
2226 }
2227