xref: /openbmc/linux/fs/f2fs/segment.c (revision 8c0b9ee8)
1 /*
2  * fs/f2fs/segment.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/prefetch.h>
16 #include <linux/kthread.h>
17 #include <linux/vmalloc.h>
18 #include <linux/swap.h>
19 
20 #include "f2fs.h"
21 #include "segment.h"
22 #include "node.h"
23 #include "trace.h"
24 #include <trace/events/f2fs.h>
25 
26 #define __reverse_ffz(x) __reverse_ffs(~(x))
27 
28 static struct kmem_cache *discard_entry_slab;
29 static struct kmem_cache *sit_entry_set_slab;
30 static struct kmem_cache *inmem_entry_slab;
31 
32 /*
33  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
34  * MSB and LSB are reversed in a byte by f2fs_set_bit.
35  */
36 static inline unsigned long __reverse_ffs(unsigned long word)
37 {
38 	int num = 0;
39 
40 #if BITS_PER_LONG == 64
41 	if ((word & 0xffffffff) == 0) {
42 		num += 32;
43 		word >>= 32;
44 	}
45 #endif
46 	if ((word & 0xffff) == 0) {
47 		num += 16;
48 		word >>= 16;
49 	}
50 	if ((word & 0xff) == 0) {
51 		num += 8;
52 		word >>= 8;
53 	}
54 	if ((word & 0xf0) == 0)
55 		num += 4;
56 	else
57 		word >>= 4;
58 	if ((word & 0xc) == 0)
59 		num += 2;
60 	else
61 		word >>= 2;
62 	if ((word & 0x2) == 0)
63 		num += 1;
64 	return num;
65 }
66 
67 /*
68  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
69  * f2fs_set_bit makes MSB and LSB reversed in a byte.
70  * Example:
71  *                             LSB <--> MSB
72  *   f2fs_set_bit(0, bitmap) => 0000 0001
73  *   f2fs_set_bit(7, bitmap) => 1000 0000
74  */
75 static unsigned long __find_rev_next_bit(const unsigned long *addr,
76 			unsigned long size, unsigned long offset)
77 {
78 	const unsigned long *p = addr + BIT_WORD(offset);
79 	unsigned long result = offset & ~(BITS_PER_LONG - 1);
80 	unsigned long tmp;
81 	unsigned long mask, submask;
82 	unsigned long quot, rest;
83 
84 	if (offset >= size)
85 		return size;
86 
87 	size -= result;
88 	offset %= BITS_PER_LONG;
89 	if (!offset)
90 		goto aligned;
91 
92 	tmp = *(p++);
93 	quot = (offset >> 3) << 3;
94 	rest = offset & 0x7;
95 	mask = ~0UL << quot;
96 	submask = (unsigned char)(0xff << rest) >> rest;
97 	submask <<= quot;
98 	mask &= submask;
99 	tmp &= mask;
100 	if (size < BITS_PER_LONG)
101 		goto found_first;
102 	if (tmp)
103 		goto found_middle;
104 
105 	size -= BITS_PER_LONG;
106 	result += BITS_PER_LONG;
107 aligned:
108 	while (size & ~(BITS_PER_LONG-1)) {
109 		tmp = *(p++);
110 		if (tmp)
111 			goto found_middle;
112 		result += BITS_PER_LONG;
113 		size -= BITS_PER_LONG;
114 	}
115 	if (!size)
116 		return result;
117 	tmp = *p;
118 found_first:
119 	tmp &= (~0UL >> (BITS_PER_LONG - size));
120 	if (tmp == 0UL)		/* Are any bits set? */
121 		return result + size;   /* Nope. */
122 found_middle:
123 	return result + __reverse_ffs(tmp);
124 }
125 
126 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
127 			unsigned long size, unsigned long offset)
128 {
129 	const unsigned long *p = addr + BIT_WORD(offset);
130 	unsigned long result = offset & ~(BITS_PER_LONG - 1);
131 	unsigned long tmp;
132 	unsigned long mask, submask;
133 	unsigned long quot, rest;
134 
135 	if (offset >= size)
136 		return size;
137 
138 	size -= result;
139 	offset %= BITS_PER_LONG;
140 	if (!offset)
141 		goto aligned;
142 
143 	tmp = *(p++);
144 	quot = (offset >> 3) << 3;
145 	rest = offset & 0x7;
146 	mask = ~(~0UL << quot);
147 	submask = (unsigned char)~((unsigned char)(0xff << rest) >> rest);
148 	submask <<= quot;
149 	mask += submask;
150 	tmp |= mask;
151 	if (size < BITS_PER_LONG)
152 		goto found_first;
153 	if (~tmp)
154 		goto found_middle;
155 
156 	size -= BITS_PER_LONG;
157 	result += BITS_PER_LONG;
158 aligned:
159 	while (size & ~(BITS_PER_LONG - 1)) {
160 		tmp = *(p++);
161 		if (~tmp)
162 			goto found_middle;
163 		result += BITS_PER_LONG;
164 		size -= BITS_PER_LONG;
165 	}
166 	if (!size)
167 		return result;
168 	tmp = *p;
169 
170 found_first:
171 	tmp |= ~0UL << size;
172 	if (tmp == ~0UL)        /* Are any bits zero? */
173 		return result + size;   /* Nope. */
174 found_middle:
175 	return result + __reverse_ffz(tmp);
176 }
177 
178 void register_inmem_page(struct inode *inode, struct page *page)
179 {
180 	struct f2fs_inode_info *fi = F2FS_I(inode);
181 	struct inmem_pages *new;
182 	int err;
183 
184 	SetPagePrivate(page);
185 	f2fs_trace_pid(page);
186 
187 	new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
188 
189 	/* add atomic page indices to the list */
190 	new->page = page;
191 	INIT_LIST_HEAD(&new->list);
192 retry:
193 	/* increase reference count with clean state */
194 	mutex_lock(&fi->inmem_lock);
195 	err = radix_tree_insert(&fi->inmem_root, page->index, new);
196 	if (err == -EEXIST) {
197 		mutex_unlock(&fi->inmem_lock);
198 		kmem_cache_free(inmem_entry_slab, new);
199 		return;
200 	} else if (err) {
201 		mutex_unlock(&fi->inmem_lock);
202 		goto retry;
203 	}
204 	get_page(page);
205 	list_add_tail(&new->list, &fi->inmem_pages);
206 	inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
207 	mutex_unlock(&fi->inmem_lock);
208 }
209 
210 void commit_inmem_pages(struct inode *inode, bool abort)
211 {
212 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
213 	struct f2fs_inode_info *fi = F2FS_I(inode);
214 	struct inmem_pages *cur, *tmp;
215 	bool submit_bio = false;
216 	struct f2fs_io_info fio = {
217 		.type = DATA,
218 		.rw = WRITE_SYNC | REQ_PRIO,
219 	};
220 
221 	/*
222 	 * The abort is true only when f2fs_evict_inode is called.
223 	 * Basically, the f2fs_evict_inode doesn't produce any data writes, so
224 	 * that we don't need to call f2fs_balance_fs.
225 	 * Otherwise, f2fs_gc in f2fs_balance_fs can wait forever until this
226 	 * inode becomes free by iget_locked in f2fs_iget.
227 	 */
228 	if (!abort) {
229 		f2fs_balance_fs(sbi);
230 		f2fs_lock_op(sbi);
231 	}
232 
233 	mutex_lock(&fi->inmem_lock);
234 	list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
235 		if (!abort) {
236 			lock_page(cur->page);
237 			if (cur->page->mapping == inode->i_mapping) {
238 				f2fs_wait_on_page_writeback(cur->page, DATA);
239 				if (clear_page_dirty_for_io(cur->page))
240 					inode_dec_dirty_pages(inode);
241 				do_write_data_page(cur->page, &fio);
242 				submit_bio = true;
243 			}
244 			f2fs_put_page(cur->page, 1);
245 		} else {
246 			put_page(cur->page);
247 		}
248 		radix_tree_delete(&fi->inmem_root, cur->page->index);
249 		list_del(&cur->list);
250 		kmem_cache_free(inmem_entry_slab, cur);
251 		dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
252 	}
253 	mutex_unlock(&fi->inmem_lock);
254 
255 	if (!abort) {
256 		f2fs_unlock_op(sbi);
257 		if (submit_bio)
258 			f2fs_submit_merged_bio(sbi, DATA, WRITE);
259 	}
260 }
261 
262 /*
263  * This function balances dirty node and dentry pages.
264  * In addition, it controls garbage collection.
265  */
266 void f2fs_balance_fs(struct f2fs_sb_info *sbi)
267 {
268 	/*
269 	 * We should do GC or end up with checkpoint, if there are so many dirty
270 	 * dir/node pages without enough free segments.
271 	 */
272 	if (has_not_enough_free_secs(sbi, 0)) {
273 		mutex_lock(&sbi->gc_mutex);
274 		f2fs_gc(sbi);
275 	}
276 }
277 
278 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
279 {
280 	/* check the # of cached NAT entries and prefree segments */
281 	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
282 			excess_prefree_segs(sbi) ||
283 			!available_free_memory(sbi, INO_ENTRIES))
284 		f2fs_sync_fs(sbi->sb, true);
285 }
286 
287 static int issue_flush_thread(void *data)
288 {
289 	struct f2fs_sb_info *sbi = data;
290 	struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
291 	wait_queue_head_t *q = &fcc->flush_wait_queue;
292 repeat:
293 	if (kthread_should_stop())
294 		return 0;
295 
296 	if (!llist_empty(&fcc->issue_list)) {
297 		struct bio *bio = bio_alloc(GFP_NOIO, 0);
298 		struct flush_cmd *cmd, *next;
299 		int ret;
300 
301 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
302 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
303 
304 		bio->bi_bdev = sbi->sb->s_bdev;
305 		ret = submit_bio_wait(WRITE_FLUSH, bio);
306 
307 		llist_for_each_entry_safe(cmd, next,
308 					  fcc->dispatch_list, llnode) {
309 			cmd->ret = ret;
310 			complete(&cmd->wait);
311 		}
312 		bio_put(bio);
313 		fcc->dispatch_list = NULL;
314 	}
315 
316 	wait_event_interruptible(*q,
317 		kthread_should_stop() || !llist_empty(&fcc->issue_list));
318 	goto repeat;
319 }
320 
321 int f2fs_issue_flush(struct f2fs_sb_info *sbi)
322 {
323 	struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
324 	struct flush_cmd cmd;
325 
326 	trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
327 					test_opt(sbi, FLUSH_MERGE));
328 
329 	if (test_opt(sbi, NOBARRIER))
330 		return 0;
331 
332 	if (!test_opt(sbi, FLUSH_MERGE))
333 		return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL);
334 
335 	init_completion(&cmd.wait);
336 
337 	llist_add(&cmd.llnode, &fcc->issue_list);
338 
339 	if (!fcc->dispatch_list)
340 		wake_up(&fcc->flush_wait_queue);
341 
342 	wait_for_completion(&cmd.wait);
343 
344 	return cmd.ret;
345 }
346 
347 int create_flush_cmd_control(struct f2fs_sb_info *sbi)
348 {
349 	dev_t dev = sbi->sb->s_bdev->bd_dev;
350 	struct flush_cmd_control *fcc;
351 	int err = 0;
352 
353 	fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
354 	if (!fcc)
355 		return -ENOMEM;
356 	init_waitqueue_head(&fcc->flush_wait_queue);
357 	init_llist_head(&fcc->issue_list);
358 	SM_I(sbi)->cmd_control_info = fcc;
359 	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
360 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
361 	if (IS_ERR(fcc->f2fs_issue_flush)) {
362 		err = PTR_ERR(fcc->f2fs_issue_flush);
363 		kfree(fcc);
364 		SM_I(sbi)->cmd_control_info = NULL;
365 		return err;
366 	}
367 
368 	return err;
369 }
370 
371 void destroy_flush_cmd_control(struct f2fs_sb_info *sbi)
372 {
373 	struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
374 
375 	if (fcc && fcc->f2fs_issue_flush)
376 		kthread_stop(fcc->f2fs_issue_flush);
377 	kfree(fcc);
378 	SM_I(sbi)->cmd_control_info = NULL;
379 }
380 
381 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
382 		enum dirty_type dirty_type)
383 {
384 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
385 
386 	/* need not be added */
387 	if (IS_CURSEG(sbi, segno))
388 		return;
389 
390 	if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
391 		dirty_i->nr_dirty[dirty_type]++;
392 
393 	if (dirty_type == DIRTY) {
394 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
395 		enum dirty_type t = sentry->type;
396 
397 		if (unlikely(t >= DIRTY)) {
398 			f2fs_bug_on(sbi, 1);
399 			return;
400 		}
401 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
402 			dirty_i->nr_dirty[t]++;
403 	}
404 }
405 
406 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
407 		enum dirty_type dirty_type)
408 {
409 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
410 
411 	if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
412 		dirty_i->nr_dirty[dirty_type]--;
413 
414 	if (dirty_type == DIRTY) {
415 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
416 		enum dirty_type t = sentry->type;
417 
418 		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
419 			dirty_i->nr_dirty[t]--;
420 
421 		if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
422 			clear_bit(GET_SECNO(sbi, segno),
423 						dirty_i->victim_secmap);
424 	}
425 }
426 
427 /*
428  * Should not occur error such as -ENOMEM.
429  * Adding dirty entry into seglist is not critical operation.
430  * If a given segment is one of current working segments, it won't be added.
431  */
432 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
433 {
434 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
435 	unsigned short valid_blocks;
436 
437 	if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
438 		return;
439 
440 	mutex_lock(&dirty_i->seglist_lock);
441 
442 	valid_blocks = get_valid_blocks(sbi, segno, 0);
443 
444 	if (valid_blocks == 0) {
445 		__locate_dirty_segment(sbi, segno, PRE);
446 		__remove_dirty_segment(sbi, segno, DIRTY);
447 	} else if (valid_blocks < sbi->blocks_per_seg) {
448 		__locate_dirty_segment(sbi, segno, DIRTY);
449 	} else {
450 		/* Recovery routine with SSR needs this */
451 		__remove_dirty_segment(sbi, segno, DIRTY);
452 	}
453 
454 	mutex_unlock(&dirty_i->seglist_lock);
455 }
456 
457 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
458 				block_t blkstart, block_t blklen)
459 {
460 	sector_t start = SECTOR_FROM_BLOCK(blkstart);
461 	sector_t len = SECTOR_FROM_BLOCK(blklen);
462 	trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
463 	return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
464 }
465 
466 void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
467 {
468 	if (f2fs_issue_discard(sbi, blkaddr, 1)) {
469 		struct page *page = grab_meta_page(sbi, blkaddr);
470 		/* zero-filled page */
471 		set_page_dirty(page);
472 		f2fs_put_page(page, 1);
473 	}
474 }
475 
476 static void __add_discard_entry(struct f2fs_sb_info *sbi,
477 		struct cp_control *cpc, unsigned int start, unsigned int end)
478 {
479 	struct list_head *head = &SM_I(sbi)->discard_list;
480 	struct discard_entry *new, *last;
481 
482 	if (!list_empty(head)) {
483 		last = list_last_entry(head, struct discard_entry, list);
484 		if (START_BLOCK(sbi, cpc->trim_start) + start ==
485 						last->blkaddr + last->len) {
486 			last->len += end - start;
487 			goto done;
488 		}
489 	}
490 
491 	new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
492 	INIT_LIST_HEAD(&new->list);
493 	new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start;
494 	new->len = end - start;
495 	list_add_tail(&new->list, head);
496 done:
497 	SM_I(sbi)->nr_discards += end - start;
498 	cpc->trimmed += end - start;
499 }
500 
501 static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
502 {
503 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
504 	int max_blocks = sbi->blocks_per_seg;
505 	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
506 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
507 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
508 	unsigned long *dmap = SIT_I(sbi)->tmp_map;
509 	unsigned int start = 0, end = -1;
510 	bool force = (cpc->reason == CP_DISCARD);
511 	int i;
512 
513 	if (!force && (!test_opt(sbi, DISCARD) ||
514 			SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards))
515 		return;
516 
517 	if (force && !se->valid_blocks) {
518 		struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
519 		/*
520 		 * if this segment is registered in the prefree list, then
521 		 * we should skip adding a discard candidate, and let the
522 		 * checkpoint do that later.
523 		 */
524 		mutex_lock(&dirty_i->seglist_lock);
525 		if (test_bit(cpc->trim_start, dirty_i->dirty_segmap[PRE])) {
526 			mutex_unlock(&dirty_i->seglist_lock);
527 			cpc->trimmed += sbi->blocks_per_seg;
528 			return;
529 		}
530 		mutex_unlock(&dirty_i->seglist_lock);
531 
532 		__add_discard_entry(sbi, cpc, 0, sbi->blocks_per_seg);
533 		return;
534 	}
535 
536 	/* zero block will be discarded through the prefree list */
537 	if (!se->valid_blocks || se->valid_blocks == max_blocks)
538 		return;
539 
540 	/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
541 	for (i = 0; i < entries; i++)
542 		dmap[i] = force ? ~ckpt_map[i] :
543 				(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
544 
545 	while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
546 		start = __find_rev_next_bit(dmap, max_blocks, end + 1);
547 		if (start >= max_blocks)
548 			break;
549 
550 		end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
551 
552 		if (end - start < cpc->trim_minlen)
553 			continue;
554 
555 		__add_discard_entry(sbi, cpc, start, end);
556 	}
557 }
558 
559 void release_discard_addrs(struct f2fs_sb_info *sbi)
560 {
561 	struct list_head *head = &(SM_I(sbi)->discard_list);
562 	struct discard_entry *entry, *this;
563 
564 	/* drop caches */
565 	list_for_each_entry_safe(entry, this, head, list) {
566 		list_del(&entry->list);
567 		kmem_cache_free(discard_entry_slab, entry);
568 	}
569 }
570 
571 /*
572  * Should call clear_prefree_segments after checkpoint is done.
573  */
574 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
575 {
576 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
577 	unsigned int segno;
578 
579 	mutex_lock(&dirty_i->seglist_lock);
580 	for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
581 		__set_test_and_free(sbi, segno);
582 	mutex_unlock(&dirty_i->seglist_lock);
583 }
584 
585 void clear_prefree_segments(struct f2fs_sb_info *sbi)
586 {
587 	struct list_head *head = &(SM_I(sbi)->discard_list);
588 	struct discard_entry *entry, *this;
589 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
590 	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
591 	unsigned int start = 0, end = -1;
592 
593 	mutex_lock(&dirty_i->seglist_lock);
594 
595 	while (1) {
596 		int i;
597 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
598 		if (start >= MAIN_SEGS(sbi))
599 			break;
600 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
601 								start + 1);
602 
603 		for (i = start; i < end; i++)
604 			clear_bit(i, prefree_map);
605 
606 		dirty_i->nr_dirty[PRE] -= end - start;
607 
608 		if (!test_opt(sbi, DISCARD))
609 			continue;
610 
611 		f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
612 				(end - start) << sbi->log_blocks_per_seg);
613 	}
614 	mutex_unlock(&dirty_i->seglist_lock);
615 
616 	/* send small discards */
617 	list_for_each_entry_safe(entry, this, head, list) {
618 		f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
619 		list_del(&entry->list);
620 		SM_I(sbi)->nr_discards -= entry->len;
621 		kmem_cache_free(discard_entry_slab, entry);
622 	}
623 }
624 
625 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
626 {
627 	struct sit_info *sit_i = SIT_I(sbi);
628 
629 	if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
630 		sit_i->dirty_sentries++;
631 		return false;
632 	}
633 
634 	return true;
635 }
636 
637 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
638 					unsigned int segno, int modified)
639 {
640 	struct seg_entry *se = get_seg_entry(sbi, segno);
641 	se->type = type;
642 	if (modified)
643 		__mark_sit_entry_dirty(sbi, segno);
644 }
645 
646 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
647 {
648 	struct seg_entry *se;
649 	unsigned int segno, offset;
650 	long int new_vblocks;
651 
652 	segno = GET_SEGNO(sbi, blkaddr);
653 
654 	se = get_seg_entry(sbi, segno);
655 	new_vblocks = se->valid_blocks + del;
656 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
657 
658 	f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
659 				(new_vblocks > sbi->blocks_per_seg)));
660 
661 	se->valid_blocks = new_vblocks;
662 	se->mtime = get_mtime(sbi);
663 	SIT_I(sbi)->max_mtime = se->mtime;
664 
665 	/* Update valid block bitmap */
666 	if (del > 0) {
667 		if (f2fs_test_and_set_bit(offset, se->cur_valid_map))
668 			f2fs_bug_on(sbi, 1);
669 	} else {
670 		if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map))
671 			f2fs_bug_on(sbi, 1);
672 	}
673 	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
674 		se->ckpt_valid_blocks += del;
675 
676 	__mark_sit_entry_dirty(sbi, segno);
677 
678 	/* update total number of valid blocks to be written in ckpt area */
679 	SIT_I(sbi)->written_valid_blocks += del;
680 
681 	if (sbi->segs_per_sec > 1)
682 		get_sec_entry(sbi, segno)->valid_blocks += del;
683 }
684 
685 void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
686 {
687 	update_sit_entry(sbi, new, 1);
688 	if (GET_SEGNO(sbi, old) != NULL_SEGNO)
689 		update_sit_entry(sbi, old, -1);
690 
691 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
692 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
693 }
694 
695 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
696 {
697 	unsigned int segno = GET_SEGNO(sbi, addr);
698 	struct sit_info *sit_i = SIT_I(sbi);
699 
700 	f2fs_bug_on(sbi, addr == NULL_ADDR);
701 	if (addr == NEW_ADDR)
702 		return;
703 
704 	/* add it into sit main buffer */
705 	mutex_lock(&sit_i->sentry_lock);
706 
707 	update_sit_entry(sbi, addr, -1);
708 
709 	/* add it into dirty seglist */
710 	locate_dirty_segment(sbi, segno);
711 
712 	mutex_unlock(&sit_i->sentry_lock);
713 }
714 
715 /*
716  * This function should be resided under the curseg_mutex lock
717  */
718 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
719 					struct f2fs_summary *sum)
720 {
721 	struct curseg_info *curseg = CURSEG_I(sbi, type);
722 	void *addr = curseg->sum_blk;
723 	addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
724 	memcpy(addr, sum, sizeof(struct f2fs_summary));
725 }
726 
727 /*
728  * Calculate the number of current summary pages for writing
729  */
730 int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
731 {
732 	int valid_sum_count = 0;
733 	int i, sum_in_page;
734 
735 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
736 		if (sbi->ckpt->alloc_type[i] == SSR)
737 			valid_sum_count += sbi->blocks_per_seg;
738 		else {
739 			if (for_ra)
740 				valid_sum_count += le16_to_cpu(
741 					F2FS_CKPT(sbi)->cur_data_blkoff[i]);
742 			else
743 				valid_sum_count += curseg_blkoff(sbi, i);
744 		}
745 	}
746 
747 	sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
748 			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
749 	if (valid_sum_count <= sum_in_page)
750 		return 1;
751 	else if ((valid_sum_count - sum_in_page) <=
752 		(PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
753 		return 2;
754 	return 3;
755 }
756 
757 /*
758  * Caller should put this summary page
759  */
760 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
761 {
762 	return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
763 }
764 
765 static void write_sum_page(struct f2fs_sb_info *sbi,
766 			struct f2fs_summary_block *sum_blk, block_t blk_addr)
767 {
768 	struct page *page = grab_meta_page(sbi, blk_addr);
769 	void *kaddr = page_address(page);
770 	memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
771 	set_page_dirty(page);
772 	f2fs_put_page(page, 1);
773 }
774 
775 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
776 {
777 	struct curseg_info *curseg = CURSEG_I(sbi, type);
778 	unsigned int segno = curseg->segno + 1;
779 	struct free_segmap_info *free_i = FREE_I(sbi);
780 
781 	if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
782 		return !test_bit(segno, free_i->free_segmap);
783 	return 0;
784 }
785 
786 /*
787  * Find a new segment from the free segments bitmap to right order
788  * This function should be returned with success, otherwise BUG
789  */
790 static void get_new_segment(struct f2fs_sb_info *sbi,
791 			unsigned int *newseg, bool new_sec, int dir)
792 {
793 	struct free_segmap_info *free_i = FREE_I(sbi);
794 	unsigned int segno, secno, zoneno;
795 	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
796 	unsigned int hint = *newseg / sbi->segs_per_sec;
797 	unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
798 	unsigned int left_start = hint;
799 	bool init = true;
800 	int go_left = 0;
801 	int i;
802 
803 	spin_lock(&free_i->segmap_lock);
804 
805 	if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
806 		segno = find_next_zero_bit(free_i->free_segmap,
807 					MAIN_SEGS(sbi), *newseg + 1);
808 		if (segno - *newseg < sbi->segs_per_sec -
809 					(*newseg % sbi->segs_per_sec))
810 			goto got_it;
811 	}
812 find_other_zone:
813 	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
814 	if (secno >= MAIN_SECS(sbi)) {
815 		if (dir == ALLOC_RIGHT) {
816 			secno = find_next_zero_bit(free_i->free_secmap,
817 							MAIN_SECS(sbi), 0);
818 			f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
819 		} else {
820 			go_left = 1;
821 			left_start = hint - 1;
822 		}
823 	}
824 	if (go_left == 0)
825 		goto skip_left;
826 
827 	while (test_bit(left_start, free_i->free_secmap)) {
828 		if (left_start > 0) {
829 			left_start--;
830 			continue;
831 		}
832 		left_start = find_next_zero_bit(free_i->free_secmap,
833 							MAIN_SECS(sbi), 0);
834 		f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
835 		break;
836 	}
837 	secno = left_start;
838 skip_left:
839 	hint = secno;
840 	segno = secno * sbi->segs_per_sec;
841 	zoneno = secno / sbi->secs_per_zone;
842 
843 	/* give up on finding another zone */
844 	if (!init)
845 		goto got_it;
846 	if (sbi->secs_per_zone == 1)
847 		goto got_it;
848 	if (zoneno == old_zoneno)
849 		goto got_it;
850 	if (dir == ALLOC_LEFT) {
851 		if (!go_left && zoneno + 1 >= total_zones)
852 			goto got_it;
853 		if (go_left && zoneno == 0)
854 			goto got_it;
855 	}
856 	for (i = 0; i < NR_CURSEG_TYPE; i++)
857 		if (CURSEG_I(sbi, i)->zone == zoneno)
858 			break;
859 
860 	if (i < NR_CURSEG_TYPE) {
861 		/* zone is in user, try another */
862 		if (go_left)
863 			hint = zoneno * sbi->secs_per_zone - 1;
864 		else if (zoneno + 1 >= total_zones)
865 			hint = 0;
866 		else
867 			hint = (zoneno + 1) * sbi->secs_per_zone;
868 		init = false;
869 		goto find_other_zone;
870 	}
871 got_it:
872 	/* set it as dirty segment in free segmap */
873 	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
874 	__set_inuse(sbi, segno);
875 	*newseg = segno;
876 	spin_unlock(&free_i->segmap_lock);
877 }
878 
879 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
880 {
881 	struct curseg_info *curseg = CURSEG_I(sbi, type);
882 	struct summary_footer *sum_footer;
883 
884 	curseg->segno = curseg->next_segno;
885 	curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
886 	curseg->next_blkoff = 0;
887 	curseg->next_segno = NULL_SEGNO;
888 
889 	sum_footer = &(curseg->sum_blk->footer);
890 	memset(sum_footer, 0, sizeof(struct summary_footer));
891 	if (IS_DATASEG(type))
892 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
893 	if (IS_NODESEG(type))
894 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
895 	__set_sit_entry_type(sbi, type, curseg->segno, modified);
896 }
897 
898 /*
899  * Allocate a current working segment.
900  * This function always allocates a free segment in LFS manner.
901  */
902 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
903 {
904 	struct curseg_info *curseg = CURSEG_I(sbi, type);
905 	unsigned int segno = curseg->segno;
906 	int dir = ALLOC_LEFT;
907 
908 	write_sum_page(sbi, curseg->sum_blk,
909 				GET_SUM_BLOCK(sbi, segno));
910 	if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
911 		dir = ALLOC_RIGHT;
912 
913 	if (test_opt(sbi, NOHEAP))
914 		dir = ALLOC_RIGHT;
915 
916 	get_new_segment(sbi, &segno, new_sec, dir);
917 	curseg->next_segno = segno;
918 	reset_curseg(sbi, type, 1);
919 	curseg->alloc_type = LFS;
920 }
921 
922 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
923 			struct curseg_info *seg, block_t start)
924 {
925 	struct seg_entry *se = get_seg_entry(sbi, seg->segno);
926 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
927 	unsigned long *target_map = SIT_I(sbi)->tmp_map;
928 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
929 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
930 	int i, pos;
931 
932 	for (i = 0; i < entries; i++)
933 		target_map[i] = ckpt_map[i] | cur_map[i];
934 
935 	pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
936 
937 	seg->next_blkoff = pos;
938 }
939 
940 /*
941  * If a segment is written by LFS manner, next block offset is just obtained
942  * by increasing the current block offset. However, if a segment is written by
943  * SSR manner, next block offset obtained by calling __next_free_blkoff
944  */
945 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
946 				struct curseg_info *seg)
947 {
948 	if (seg->alloc_type == SSR)
949 		__next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
950 	else
951 		seg->next_blkoff++;
952 }
953 
954 /*
955  * This function always allocates a used segment(from dirty seglist) by SSR
956  * manner, so it should recover the existing segment information of valid blocks
957  */
958 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
959 {
960 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
961 	struct curseg_info *curseg = CURSEG_I(sbi, type);
962 	unsigned int new_segno = curseg->next_segno;
963 	struct f2fs_summary_block *sum_node;
964 	struct page *sum_page;
965 
966 	write_sum_page(sbi, curseg->sum_blk,
967 				GET_SUM_BLOCK(sbi, curseg->segno));
968 	__set_test_and_inuse(sbi, new_segno);
969 
970 	mutex_lock(&dirty_i->seglist_lock);
971 	__remove_dirty_segment(sbi, new_segno, PRE);
972 	__remove_dirty_segment(sbi, new_segno, DIRTY);
973 	mutex_unlock(&dirty_i->seglist_lock);
974 
975 	reset_curseg(sbi, type, 1);
976 	curseg->alloc_type = SSR;
977 	__next_free_blkoff(sbi, curseg, 0);
978 
979 	if (reuse) {
980 		sum_page = get_sum_page(sbi, new_segno);
981 		sum_node = (struct f2fs_summary_block *)page_address(sum_page);
982 		memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
983 		f2fs_put_page(sum_page, 1);
984 	}
985 }
986 
987 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
988 {
989 	struct curseg_info *curseg = CURSEG_I(sbi, type);
990 	const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
991 
992 	if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
993 		return v_ops->get_victim(sbi,
994 				&(curseg)->next_segno, BG_GC, type, SSR);
995 
996 	/* For data segments, let's do SSR more intensively */
997 	for (; type >= CURSEG_HOT_DATA; type--)
998 		if (v_ops->get_victim(sbi, &(curseg)->next_segno,
999 						BG_GC, type, SSR))
1000 			return 1;
1001 	return 0;
1002 }
1003 
1004 /*
1005  * flush out current segment and replace it with new segment
1006  * This function should be returned with success, otherwise BUG
1007  */
1008 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
1009 						int type, bool force)
1010 {
1011 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1012 
1013 	if (force)
1014 		new_curseg(sbi, type, true);
1015 	else if (type == CURSEG_WARM_NODE)
1016 		new_curseg(sbi, type, false);
1017 	else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
1018 		new_curseg(sbi, type, false);
1019 	else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
1020 		change_curseg(sbi, type, true);
1021 	else
1022 		new_curseg(sbi, type, false);
1023 
1024 	stat_inc_seg_type(sbi, curseg);
1025 }
1026 
1027 static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type)
1028 {
1029 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1030 	unsigned int old_segno;
1031 
1032 	old_segno = curseg->segno;
1033 	SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
1034 	locate_dirty_segment(sbi, old_segno);
1035 }
1036 
1037 void allocate_new_segments(struct f2fs_sb_info *sbi)
1038 {
1039 	int i;
1040 
1041 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
1042 		__allocate_new_segments(sbi, i);
1043 }
1044 
1045 static const struct segment_allocation default_salloc_ops = {
1046 	.allocate_segment = allocate_segment_by_default,
1047 };
1048 
1049 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
1050 {
1051 	__u64 start = F2FS_BYTES_TO_BLK(range->start);
1052 	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
1053 	unsigned int start_segno, end_segno;
1054 	struct cp_control cpc;
1055 
1056 	if (range->minlen > SEGMENT_SIZE(sbi) || start >= MAX_BLKADDR(sbi) ||
1057 						range->len < sbi->blocksize)
1058 		return -EINVAL;
1059 
1060 	cpc.trimmed = 0;
1061 	if (end <= MAIN_BLKADDR(sbi))
1062 		goto out;
1063 
1064 	/* start/end segment number in main_area */
1065 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
1066 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
1067 						GET_SEGNO(sbi, end);
1068 	cpc.reason = CP_DISCARD;
1069 	cpc.trim_minlen = F2FS_BYTES_TO_BLK(range->minlen);
1070 
1071 	/* do checkpoint to issue discard commands safely */
1072 	for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
1073 		cpc.trim_start = start_segno;
1074 		cpc.trim_end = min_t(unsigned int, rounddown(start_segno +
1075 				BATCHED_TRIM_SEGMENTS(sbi),
1076 				sbi->segs_per_sec) - 1, end_segno);
1077 
1078 		mutex_lock(&sbi->gc_mutex);
1079 		write_checkpoint(sbi, &cpc);
1080 		mutex_unlock(&sbi->gc_mutex);
1081 	}
1082 out:
1083 	range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
1084 	return 0;
1085 }
1086 
1087 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
1088 {
1089 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1090 	if (curseg->next_blkoff < sbi->blocks_per_seg)
1091 		return true;
1092 	return false;
1093 }
1094 
1095 static int __get_segment_type_2(struct page *page, enum page_type p_type)
1096 {
1097 	if (p_type == DATA)
1098 		return CURSEG_HOT_DATA;
1099 	else
1100 		return CURSEG_HOT_NODE;
1101 }
1102 
1103 static int __get_segment_type_4(struct page *page, enum page_type p_type)
1104 {
1105 	if (p_type == DATA) {
1106 		struct inode *inode = page->mapping->host;
1107 
1108 		if (S_ISDIR(inode->i_mode))
1109 			return CURSEG_HOT_DATA;
1110 		else
1111 			return CURSEG_COLD_DATA;
1112 	} else {
1113 		if (IS_DNODE(page) && is_cold_node(page))
1114 			return CURSEG_WARM_NODE;
1115 		else
1116 			return CURSEG_COLD_NODE;
1117 	}
1118 }
1119 
1120 static int __get_segment_type_6(struct page *page, enum page_type p_type)
1121 {
1122 	if (p_type == DATA) {
1123 		struct inode *inode = page->mapping->host;
1124 
1125 		if (S_ISDIR(inode->i_mode))
1126 			return CURSEG_HOT_DATA;
1127 		else if (is_cold_data(page) || file_is_cold(inode))
1128 			return CURSEG_COLD_DATA;
1129 		else
1130 			return CURSEG_WARM_DATA;
1131 	} else {
1132 		if (IS_DNODE(page))
1133 			return is_cold_node(page) ? CURSEG_WARM_NODE :
1134 						CURSEG_HOT_NODE;
1135 		else
1136 			return CURSEG_COLD_NODE;
1137 	}
1138 }
1139 
1140 static int __get_segment_type(struct page *page, enum page_type p_type)
1141 {
1142 	switch (F2FS_P_SB(page)->active_logs) {
1143 	case 2:
1144 		return __get_segment_type_2(page, p_type);
1145 	case 4:
1146 		return __get_segment_type_4(page, p_type);
1147 	}
1148 	/* NR_CURSEG_TYPE(6) logs by default */
1149 	f2fs_bug_on(F2FS_P_SB(page),
1150 		F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
1151 	return __get_segment_type_6(page, p_type);
1152 }
1153 
1154 void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
1155 		block_t old_blkaddr, block_t *new_blkaddr,
1156 		struct f2fs_summary *sum, int type)
1157 {
1158 	struct sit_info *sit_i = SIT_I(sbi);
1159 	struct curseg_info *curseg;
1160 	bool direct_io = (type == CURSEG_DIRECT_IO);
1161 
1162 	type = direct_io ? CURSEG_WARM_DATA : type;
1163 
1164 	curseg = CURSEG_I(sbi, type);
1165 
1166 	mutex_lock(&curseg->curseg_mutex);
1167 
1168 	/* direct_io'ed data is aligned to the segment for better performance */
1169 	if (direct_io && curseg->next_blkoff)
1170 		__allocate_new_segments(sbi, type);
1171 
1172 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
1173 
1174 	/*
1175 	 * __add_sum_entry should be resided under the curseg_mutex
1176 	 * because, this function updates a summary entry in the
1177 	 * current summary block.
1178 	 */
1179 	__add_sum_entry(sbi, type, sum);
1180 
1181 	mutex_lock(&sit_i->sentry_lock);
1182 	__refresh_next_blkoff(sbi, curseg);
1183 
1184 	stat_inc_block_count(sbi, curseg);
1185 
1186 	if (!__has_curseg_space(sbi, type))
1187 		sit_i->s_ops->allocate_segment(sbi, type, false);
1188 	/*
1189 	 * SIT information should be updated before segment allocation,
1190 	 * since SSR needs latest valid block information.
1191 	 */
1192 	refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
1193 
1194 	mutex_unlock(&sit_i->sentry_lock);
1195 
1196 	if (page && IS_NODESEG(type))
1197 		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
1198 
1199 	mutex_unlock(&curseg->curseg_mutex);
1200 }
1201 
1202 static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
1203 			struct f2fs_summary *sum,
1204 			struct f2fs_io_info *fio)
1205 {
1206 	int type = __get_segment_type(page, fio->type);
1207 
1208 	allocate_data_block(sbi, page, fio->blk_addr, &fio->blk_addr, sum, type);
1209 
1210 	/* writeout dirty page into bdev */
1211 	f2fs_submit_page_mbio(sbi, page, fio);
1212 }
1213 
1214 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
1215 {
1216 	struct f2fs_io_info fio = {
1217 		.type = META,
1218 		.rw = WRITE_SYNC | REQ_META | REQ_PRIO,
1219 		.blk_addr = page->index,
1220 	};
1221 
1222 	set_page_writeback(page);
1223 	f2fs_submit_page_mbio(sbi, page, &fio);
1224 }
1225 
1226 void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
1227 			unsigned int nid, struct f2fs_io_info *fio)
1228 {
1229 	struct f2fs_summary sum;
1230 	set_summary(&sum, nid, 0, 0);
1231 	do_write_page(sbi, page, &sum, fio);
1232 }
1233 
1234 void write_data_page(struct page *page, struct dnode_of_data *dn,
1235 				struct f2fs_io_info *fio)
1236 {
1237 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1238 	struct f2fs_summary sum;
1239 	struct node_info ni;
1240 
1241 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
1242 	get_node_info(sbi, dn->nid, &ni);
1243 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1244 	do_write_page(sbi, page, &sum, fio);
1245 	dn->data_blkaddr = fio->blk_addr;
1246 }
1247 
1248 void rewrite_data_page(struct page *page, struct f2fs_io_info *fio)
1249 {
1250 	stat_inc_inplace_blocks(F2FS_P_SB(page));
1251 	f2fs_submit_page_mbio(F2FS_P_SB(page), page, fio);
1252 }
1253 
1254 void recover_data_page(struct f2fs_sb_info *sbi,
1255 			struct page *page, struct f2fs_summary *sum,
1256 			block_t old_blkaddr, block_t new_blkaddr)
1257 {
1258 	struct sit_info *sit_i = SIT_I(sbi);
1259 	struct curseg_info *curseg;
1260 	unsigned int segno, old_cursegno;
1261 	struct seg_entry *se;
1262 	int type;
1263 
1264 	segno = GET_SEGNO(sbi, new_blkaddr);
1265 	se = get_seg_entry(sbi, segno);
1266 	type = se->type;
1267 
1268 	if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
1269 		if (old_blkaddr == NULL_ADDR)
1270 			type = CURSEG_COLD_DATA;
1271 		else
1272 			type = CURSEG_WARM_DATA;
1273 	}
1274 	curseg = CURSEG_I(sbi, type);
1275 
1276 	mutex_lock(&curseg->curseg_mutex);
1277 	mutex_lock(&sit_i->sentry_lock);
1278 
1279 	old_cursegno = curseg->segno;
1280 
1281 	/* change the current segment */
1282 	if (segno != curseg->segno) {
1283 		curseg->next_segno = segno;
1284 		change_curseg(sbi, type, true);
1285 	}
1286 
1287 	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
1288 	__add_sum_entry(sbi, type, sum);
1289 
1290 	refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
1291 	locate_dirty_segment(sbi, old_cursegno);
1292 
1293 	mutex_unlock(&sit_i->sentry_lock);
1294 	mutex_unlock(&curseg->curseg_mutex);
1295 }
1296 
1297 static inline bool is_merged_page(struct f2fs_sb_info *sbi,
1298 					struct page *page, enum page_type type)
1299 {
1300 	enum page_type btype = PAGE_TYPE_OF_BIO(type);
1301 	struct f2fs_bio_info *io = &sbi->write_io[btype];
1302 	struct bio_vec *bvec;
1303 	int i;
1304 
1305 	down_read(&io->io_rwsem);
1306 	if (!io->bio)
1307 		goto out;
1308 
1309 	bio_for_each_segment_all(bvec, io->bio, i) {
1310 		if (page == bvec->bv_page) {
1311 			up_read(&io->io_rwsem);
1312 			return true;
1313 		}
1314 	}
1315 
1316 out:
1317 	up_read(&io->io_rwsem);
1318 	return false;
1319 }
1320 
1321 void f2fs_wait_on_page_writeback(struct page *page,
1322 				enum page_type type)
1323 {
1324 	if (PageWriteback(page)) {
1325 		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1326 
1327 		if (is_merged_page(sbi, page, type))
1328 			f2fs_submit_merged_bio(sbi, type, WRITE);
1329 		wait_on_page_writeback(page);
1330 	}
1331 }
1332 
1333 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1334 {
1335 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1336 	struct curseg_info *seg_i;
1337 	unsigned char *kaddr;
1338 	struct page *page;
1339 	block_t start;
1340 	int i, j, offset;
1341 
1342 	start = start_sum_block(sbi);
1343 
1344 	page = get_meta_page(sbi, start++);
1345 	kaddr = (unsigned char *)page_address(page);
1346 
1347 	/* Step 1: restore nat cache */
1348 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1349 	memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
1350 
1351 	/* Step 2: restore sit cache */
1352 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1353 	memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
1354 						SUM_JOURNAL_SIZE);
1355 	offset = 2 * SUM_JOURNAL_SIZE;
1356 
1357 	/* Step 3: restore summary entries */
1358 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1359 		unsigned short blk_off;
1360 		unsigned int segno;
1361 
1362 		seg_i = CURSEG_I(sbi, i);
1363 		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1364 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1365 		seg_i->next_segno = segno;
1366 		reset_curseg(sbi, i, 0);
1367 		seg_i->alloc_type = ckpt->alloc_type[i];
1368 		seg_i->next_blkoff = blk_off;
1369 
1370 		if (seg_i->alloc_type == SSR)
1371 			blk_off = sbi->blocks_per_seg;
1372 
1373 		for (j = 0; j < blk_off; j++) {
1374 			struct f2fs_summary *s;
1375 			s = (struct f2fs_summary *)(kaddr + offset);
1376 			seg_i->sum_blk->entries[j] = *s;
1377 			offset += SUMMARY_SIZE;
1378 			if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1379 						SUM_FOOTER_SIZE)
1380 				continue;
1381 
1382 			f2fs_put_page(page, 1);
1383 			page = NULL;
1384 
1385 			page = get_meta_page(sbi, start++);
1386 			kaddr = (unsigned char *)page_address(page);
1387 			offset = 0;
1388 		}
1389 	}
1390 	f2fs_put_page(page, 1);
1391 	return 0;
1392 }
1393 
1394 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1395 {
1396 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1397 	struct f2fs_summary_block *sum;
1398 	struct curseg_info *curseg;
1399 	struct page *new;
1400 	unsigned short blk_off;
1401 	unsigned int segno = 0;
1402 	block_t blk_addr = 0;
1403 
1404 	/* get segment number and block addr */
1405 	if (IS_DATASEG(type)) {
1406 		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1407 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1408 							CURSEG_HOT_DATA]);
1409 		if (__exist_node_summaries(sbi))
1410 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1411 		else
1412 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1413 	} else {
1414 		segno = le32_to_cpu(ckpt->cur_node_segno[type -
1415 							CURSEG_HOT_NODE]);
1416 		blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1417 							CURSEG_HOT_NODE]);
1418 		if (__exist_node_summaries(sbi))
1419 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1420 							type - CURSEG_HOT_NODE);
1421 		else
1422 			blk_addr = GET_SUM_BLOCK(sbi, segno);
1423 	}
1424 
1425 	new = get_meta_page(sbi, blk_addr);
1426 	sum = (struct f2fs_summary_block *)page_address(new);
1427 
1428 	if (IS_NODESEG(type)) {
1429 		if (__exist_node_summaries(sbi)) {
1430 			struct f2fs_summary *ns = &sum->entries[0];
1431 			int i;
1432 			for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1433 				ns->version = 0;
1434 				ns->ofs_in_node = 0;
1435 			}
1436 		} else {
1437 			int err;
1438 
1439 			err = restore_node_summary(sbi, segno, sum);
1440 			if (err) {
1441 				f2fs_put_page(new, 1);
1442 				return err;
1443 			}
1444 		}
1445 	}
1446 
1447 	/* set uncompleted segment to curseg */
1448 	curseg = CURSEG_I(sbi, type);
1449 	mutex_lock(&curseg->curseg_mutex);
1450 	memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1451 	curseg->next_segno = segno;
1452 	reset_curseg(sbi, type, 0);
1453 	curseg->alloc_type = ckpt->alloc_type[type];
1454 	curseg->next_blkoff = blk_off;
1455 	mutex_unlock(&curseg->curseg_mutex);
1456 	f2fs_put_page(new, 1);
1457 	return 0;
1458 }
1459 
1460 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1461 {
1462 	int type = CURSEG_HOT_DATA;
1463 	int err;
1464 
1465 	if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
1466 		int npages = npages_for_summary_flush(sbi, true);
1467 
1468 		if (npages >= 2)
1469 			ra_meta_pages(sbi, start_sum_block(sbi), npages,
1470 								META_CP);
1471 
1472 		/* restore for compacted data summary */
1473 		if (read_compacted_summaries(sbi))
1474 			return -EINVAL;
1475 		type = CURSEG_HOT_NODE;
1476 	}
1477 
1478 	if (__exist_node_summaries(sbi))
1479 		ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
1480 					NR_CURSEG_TYPE - type, META_CP);
1481 
1482 	for (; type <= CURSEG_COLD_NODE; type++) {
1483 		err = read_normal_summaries(sbi, type);
1484 		if (err)
1485 			return err;
1486 	}
1487 
1488 	return 0;
1489 }
1490 
1491 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1492 {
1493 	struct page *page;
1494 	unsigned char *kaddr;
1495 	struct f2fs_summary *summary;
1496 	struct curseg_info *seg_i;
1497 	int written_size = 0;
1498 	int i, j;
1499 
1500 	page = grab_meta_page(sbi, blkaddr++);
1501 	kaddr = (unsigned char *)page_address(page);
1502 
1503 	/* Step 1: write nat cache */
1504 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1505 	memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1506 	written_size += SUM_JOURNAL_SIZE;
1507 
1508 	/* Step 2: write sit cache */
1509 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1510 	memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1511 						SUM_JOURNAL_SIZE);
1512 	written_size += SUM_JOURNAL_SIZE;
1513 
1514 	/* Step 3: write summary entries */
1515 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1516 		unsigned short blkoff;
1517 		seg_i = CURSEG_I(sbi, i);
1518 		if (sbi->ckpt->alloc_type[i] == SSR)
1519 			blkoff = sbi->blocks_per_seg;
1520 		else
1521 			blkoff = curseg_blkoff(sbi, i);
1522 
1523 		for (j = 0; j < blkoff; j++) {
1524 			if (!page) {
1525 				page = grab_meta_page(sbi, blkaddr++);
1526 				kaddr = (unsigned char *)page_address(page);
1527 				written_size = 0;
1528 			}
1529 			summary = (struct f2fs_summary *)(kaddr + written_size);
1530 			*summary = seg_i->sum_blk->entries[j];
1531 			written_size += SUMMARY_SIZE;
1532 
1533 			if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1534 							SUM_FOOTER_SIZE)
1535 				continue;
1536 
1537 			set_page_dirty(page);
1538 			f2fs_put_page(page, 1);
1539 			page = NULL;
1540 		}
1541 	}
1542 	if (page) {
1543 		set_page_dirty(page);
1544 		f2fs_put_page(page, 1);
1545 	}
1546 }
1547 
1548 static void write_normal_summaries(struct f2fs_sb_info *sbi,
1549 					block_t blkaddr, int type)
1550 {
1551 	int i, end;
1552 	if (IS_DATASEG(type))
1553 		end = type + NR_CURSEG_DATA_TYPE;
1554 	else
1555 		end = type + NR_CURSEG_NODE_TYPE;
1556 
1557 	for (i = type; i < end; i++) {
1558 		struct curseg_info *sum = CURSEG_I(sbi, i);
1559 		mutex_lock(&sum->curseg_mutex);
1560 		write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1561 		mutex_unlock(&sum->curseg_mutex);
1562 	}
1563 }
1564 
1565 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1566 {
1567 	if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
1568 		write_compacted_summaries(sbi, start_blk);
1569 	else
1570 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1571 }
1572 
1573 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1574 {
1575 	write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
1576 }
1577 
1578 int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1579 					unsigned int val, int alloc)
1580 {
1581 	int i;
1582 
1583 	if (type == NAT_JOURNAL) {
1584 		for (i = 0; i < nats_in_cursum(sum); i++) {
1585 			if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1586 				return i;
1587 		}
1588 		if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1589 			return update_nats_in_cursum(sum, 1);
1590 	} else if (type == SIT_JOURNAL) {
1591 		for (i = 0; i < sits_in_cursum(sum); i++)
1592 			if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1593 				return i;
1594 		if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1595 			return update_sits_in_cursum(sum, 1);
1596 	}
1597 	return -1;
1598 }
1599 
1600 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1601 					unsigned int segno)
1602 {
1603 	return get_meta_page(sbi, current_sit_addr(sbi, segno));
1604 }
1605 
1606 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1607 					unsigned int start)
1608 {
1609 	struct sit_info *sit_i = SIT_I(sbi);
1610 	struct page *src_page, *dst_page;
1611 	pgoff_t src_off, dst_off;
1612 	void *src_addr, *dst_addr;
1613 
1614 	src_off = current_sit_addr(sbi, start);
1615 	dst_off = next_sit_addr(sbi, src_off);
1616 
1617 	/* get current sit block page without lock */
1618 	src_page = get_meta_page(sbi, src_off);
1619 	dst_page = grab_meta_page(sbi, dst_off);
1620 	f2fs_bug_on(sbi, PageDirty(src_page));
1621 
1622 	src_addr = page_address(src_page);
1623 	dst_addr = page_address(dst_page);
1624 	memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1625 
1626 	set_page_dirty(dst_page);
1627 	f2fs_put_page(src_page, 1);
1628 
1629 	set_to_next_sit(sit_i, start);
1630 
1631 	return dst_page;
1632 }
1633 
1634 static struct sit_entry_set *grab_sit_entry_set(void)
1635 {
1636 	struct sit_entry_set *ses =
1637 			f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_ATOMIC);
1638 
1639 	ses->entry_cnt = 0;
1640 	INIT_LIST_HEAD(&ses->set_list);
1641 	return ses;
1642 }
1643 
1644 static void release_sit_entry_set(struct sit_entry_set *ses)
1645 {
1646 	list_del(&ses->set_list);
1647 	kmem_cache_free(sit_entry_set_slab, ses);
1648 }
1649 
1650 static void adjust_sit_entry_set(struct sit_entry_set *ses,
1651 						struct list_head *head)
1652 {
1653 	struct sit_entry_set *next = ses;
1654 
1655 	if (list_is_last(&ses->set_list, head))
1656 		return;
1657 
1658 	list_for_each_entry_continue(next, head, set_list)
1659 		if (ses->entry_cnt <= next->entry_cnt)
1660 			break;
1661 
1662 	list_move_tail(&ses->set_list, &next->set_list);
1663 }
1664 
1665 static void add_sit_entry(unsigned int segno, struct list_head *head)
1666 {
1667 	struct sit_entry_set *ses;
1668 	unsigned int start_segno = START_SEGNO(segno);
1669 
1670 	list_for_each_entry(ses, head, set_list) {
1671 		if (ses->start_segno == start_segno) {
1672 			ses->entry_cnt++;
1673 			adjust_sit_entry_set(ses, head);
1674 			return;
1675 		}
1676 	}
1677 
1678 	ses = grab_sit_entry_set();
1679 
1680 	ses->start_segno = start_segno;
1681 	ses->entry_cnt++;
1682 	list_add(&ses->set_list, head);
1683 }
1684 
1685 static void add_sits_in_set(struct f2fs_sb_info *sbi)
1686 {
1687 	struct f2fs_sm_info *sm_info = SM_I(sbi);
1688 	struct list_head *set_list = &sm_info->sit_entry_set;
1689 	unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
1690 	unsigned int segno;
1691 
1692 	for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
1693 		add_sit_entry(segno, set_list);
1694 }
1695 
1696 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
1697 {
1698 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1699 	struct f2fs_summary_block *sum = curseg->sum_blk;
1700 	int i;
1701 
1702 	for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1703 		unsigned int segno;
1704 		bool dirtied;
1705 
1706 		segno = le32_to_cpu(segno_in_journal(sum, i));
1707 		dirtied = __mark_sit_entry_dirty(sbi, segno);
1708 
1709 		if (!dirtied)
1710 			add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
1711 	}
1712 	update_sits_in_cursum(sum, -sits_in_cursum(sum));
1713 }
1714 
1715 /*
1716  * CP calls this function, which flushes SIT entries including sit_journal,
1717  * and moves prefree segs to free segs.
1718  */
1719 void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1720 {
1721 	struct sit_info *sit_i = SIT_I(sbi);
1722 	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1723 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1724 	struct f2fs_summary_block *sum = curseg->sum_blk;
1725 	struct sit_entry_set *ses, *tmp;
1726 	struct list_head *head = &SM_I(sbi)->sit_entry_set;
1727 	bool to_journal = true;
1728 	struct seg_entry *se;
1729 
1730 	mutex_lock(&curseg->curseg_mutex);
1731 	mutex_lock(&sit_i->sentry_lock);
1732 
1733 	/*
1734 	 * add and account sit entries of dirty bitmap in sit entry
1735 	 * set temporarily
1736 	 */
1737 	add_sits_in_set(sbi);
1738 
1739 	/*
1740 	 * if there are no enough space in journal to store dirty sit
1741 	 * entries, remove all entries from journal and add and account
1742 	 * them in sit entry set.
1743 	 */
1744 	if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL))
1745 		remove_sits_in_journal(sbi);
1746 
1747 	if (!sit_i->dirty_sentries)
1748 		goto out;
1749 
1750 	/*
1751 	 * there are two steps to flush sit entries:
1752 	 * #1, flush sit entries to journal in current cold data summary block.
1753 	 * #2, flush sit entries to sit page.
1754 	 */
1755 	list_for_each_entry_safe(ses, tmp, head, set_list) {
1756 		struct page *page = NULL;
1757 		struct f2fs_sit_block *raw_sit = NULL;
1758 		unsigned int start_segno = ses->start_segno;
1759 		unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
1760 						(unsigned long)MAIN_SEGS(sbi));
1761 		unsigned int segno = start_segno;
1762 
1763 		if (to_journal &&
1764 			!__has_cursum_space(sum, ses->entry_cnt, SIT_JOURNAL))
1765 			to_journal = false;
1766 
1767 		if (!to_journal) {
1768 			page = get_next_sit_page(sbi, start_segno);
1769 			raw_sit = page_address(page);
1770 		}
1771 
1772 		/* flush dirty sit entries in region of current sit set */
1773 		for_each_set_bit_from(segno, bitmap, end) {
1774 			int offset, sit_offset;
1775 
1776 			se = get_seg_entry(sbi, segno);
1777 
1778 			/* add discard candidates */
1779 			if (cpc->reason != CP_DISCARD) {
1780 				cpc->trim_start = segno;
1781 				add_discard_addrs(sbi, cpc);
1782 			}
1783 
1784 			if (to_journal) {
1785 				offset = lookup_journal_in_cursum(sum,
1786 							SIT_JOURNAL, segno, 1);
1787 				f2fs_bug_on(sbi, offset < 0);
1788 				segno_in_journal(sum, offset) =
1789 							cpu_to_le32(segno);
1790 				seg_info_to_raw_sit(se,
1791 						&sit_in_journal(sum, offset));
1792 			} else {
1793 				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1794 				seg_info_to_raw_sit(se,
1795 						&raw_sit->entries[sit_offset]);
1796 			}
1797 
1798 			__clear_bit(segno, bitmap);
1799 			sit_i->dirty_sentries--;
1800 			ses->entry_cnt--;
1801 		}
1802 
1803 		if (!to_journal)
1804 			f2fs_put_page(page, 1);
1805 
1806 		f2fs_bug_on(sbi, ses->entry_cnt);
1807 		release_sit_entry_set(ses);
1808 	}
1809 
1810 	f2fs_bug_on(sbi, !list_empty(head));
1811 	f2fs_bug_on(sbi, sit_i->dirty_sentries);
1812 out:
1813 	if (cpc->reason == CP_DISCARD) {
1814 		for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
1815 			add_discard_addrs(sbi, cpc);
1816 	}
1817 	mutex_unlock(&sit_i->sentry_lock);
1818 	mutex_unlock(&curseg->curseg_mutex);
1819 
1820 	set_prefree_as_free_segments(sbi);
1821 }
1822 
1823 static int build_sit_info(struct f2fs_sb_info *sbi)
1824 {
1825 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1826 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1827 	struct sit_info *sit_i;
1828 	unsigned int sit_segs, start;
1829 	char *src_bitmap, *dst_bitmap;
1830 	unsigned int bitmap_size;
1831 
1832 	/* allocate memory for SIT information */
1833 	sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1834 	if (!sit_i)
1835 		return -ENOMEM;
1836 
1837 	SM_I(sbi)->sit_info = sit_i;
1838 
1839 	sit_i->sentries = vzalloc(MAIN_SEGS(sbi) * sizeof(struct seg_entry));
1840 	if (!sit_i->sentries)
1841 		return -ENOMEM;
1842 
1843 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
1844 	sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1845 	if (!sit_i->dirty_sentries_bitmap)
1846 		return -ENOMEM;
1847 
1848 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
1849 		sit_i->sentries[start].cur_valid_map
1850 			= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1851 		sit_i->sentries[start].ckpt_valid_map
1852 			= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1853 		if (!sit_i->sentries[start].cur_valid_map
1854 				|| !sit_i->sentries[start].ckpt_valid_map)
1855 			return -ENOMEM;
1856 	}
1857 
1858 	sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1859 	if (!sit_i->tmp_map)
1860 		return -ENOMEM;
1861 
1862 	if (sbi->segs_per_sec > 1) {
1863 		sit_i->sec_entries = vzalloc(MAIN_SECS(sbi) *
1864 					sizeof(struct sec_entry));
1865 		if (!sit_i->sec_entries)
1866 			return -ENOMEM;
1867 	}
1868 
1869 	/* get information related with SIT */
1870 	sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1871 
1872 	/* setup SIT bitmap from ckeckpoint pack */
1873 	bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1874 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1875 
1876 	dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
1877 	if (!dst_bitmap)
1878 		return -ENOMEM;
1879 
1880 	/* init SIT information */
1881 	sit_i->s_ops = &default_salloc_ops;
1882 
1883 	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1884 	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1885 	sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1886 	sit_i->sit_bitmap = dst_bitmap;
1887 	sit_i->bitmap_size = bitmap_size;
1888 	sit_i->dirty_sentries = 0;
1889 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1890 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1891 	sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1892 	mutex_init(&sit_i->sentry_lock);
1893 	return 0;
1894 }
1895 
1896 static int build_free_segmap(struct f2fs_sb_info *sbi)
1897 {
1898 	struct free_segmap_info *free_i;
1899 	unsigned int bitmap_size, sec_bitmap_size;
1900 
1901 	/* allocate memory for free segmap information */
1902 	free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1903 	if (!free_i)
1904 		return -ENOMEM;
1905 
1906 	SM_I(sbi)->free_info = free_i;
1907 
1908 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
1909 	free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1910 	if (!free_i->free_segmap)
1911 		return -ENOMEM;
1912 
1913 	sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
1914 	free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1915 	if (!free_i->free_secmap)
1916 		return -ENOMEM;
1917 
1918 	/* set all segments as dirty temporarily */
1919 	memset(free_i->free_segmap, 0xff, bitmap_size);
1920 	memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1921 
1922 	/* init free segmap information */
1923 	free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
1924 	free_i->free_segments = 0;
1925 	free_i->free_sections = 0;
1926 	spin_lock_init(&free_i->segmap_lock);
1927 	return 0;
1928 }
1929 
1930 static int build_curseg(struct f2fs_sb_info *sbi)
1931 {
1932 	struct curseg_info *array;
1933 	int i;
1934 
1935 	array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
1936 	if (!array)
1937 		return -ENOMEM;
1938 
1939 	SM_I(sbi)->curseg_array = array;
1940 
1941 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
1942 		mutex_init(&array[i].curseg_mutex);
1943 		array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
1944 		if (!array[i].sum_blk)
1945 			return -ENOMEM;
1946 		array[i].segno = NULL_SEGNO;
1947 		array[i].next_blkoff = 0;
1948 	}
1949 	return restore_curseg_summaries(sbi);
1950 }
1951 
1952 static void build_sit_entries(struct f2fs_sb_info *sbi)
1953 {
1954 	struct sit_info *sit_i = SIT_I(sbi);
1955 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1956 	struct f2fs_summary_block *sum = curseg->sum_blk;
1957 	int sit_blk_cnt = SIT_BLK_CNT(sbi);
1958 	unsigned int i, start, end;
1959 	unsigned int readed, start_blk = 0;
1960 	int nrpages = MAX_BIO_BLOCKS(sbi);
1961 
1962 	do {
1963 		readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT);
1964 
1965 		start = start_blk * sit_i->sents_per_block;
1966 		end = (start_blk + readed) * sit_i->sents_per_block;
1967 
1968 		for (; start < end && start < MAIN_SEGS(sbi); start++) {
1969 			struct seg_entry *se = &sit_i->sentries[start];
1970 			struct f2fs_sit_block *sit_blk;
1971 			struct f2fs_sit_entry sit;
1972 			struct page *page;
1973 
1974 			mutex_lock(&curseg->curseg_mutex);
1975 			for (i = 0; i < sits_in_cursum(sum); i++) {
1976 				if (le32_to_cpu(segno_in_journal(sum, i))
1977 								== start) {
1978 					sit = sit_in_journal(sum, i);
1979 					mutex_unlock(&curseg->curseg_mutex);
1980 					goto got_it;
1981 				}
1982 			}
1983 			mutex_unlock(&curseg->curseg_mutex);
1984 
1985 			page = get_current_sit_page(sbi, start);
1986 			sit_blk = (struct f2fs_sit_block *)page_address(page);
1987 			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
1988 			f2fs_put_page(page, 1);
1989 got_it:
1990 			check_block_count(sbi, start, &sit);
1991 			seg_info_from_raw_sit(se, &sit);
1992 			if (sbi->segs_per_sec > 1) {
1993 				struct sec_entry *e = get_sec_entry(sbi, start);
1994 				e->valid_blocks += se->valid_blocks;
1995 			}
1996 		}
1997 		start_blk += readed;
1998 	} while (start_blk < sit_blk_cnt);
1999 }
2000 
2001 static void init_free_segmap(struct f2fs_sb_info *sbi)
2002 {
2003 	unsigned int start;
2004 	int type;
2005 
2006 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
2007 		struct seg_entry *sentry = get_seg_entry(sbi, start);
2008 		if (!sentry->valid_blocks)
2009 			__set_free(sbi, start);
2010 	}
2011 
2012 	/* set use the current segments */
2013 	for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
2014 		struct curseg_info *curseg_t = CURSEG_I(sbi, type);
2015 		__set_test_and_inuse(sbi, curseg_t->segno);
2016 	}
2017 }
2018 
2019 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
2020 {
2021 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2022 	struct free_segmap_info *free_i = FREE_I(sbi);
2023 	unsigned int segno = 0, offset = 0;
2024 	unsigned short valid_blocks;
2025 
2026 	while (1) {
2027 		/* find dirty segment based on free segmap */
2028 		segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
2029 		if (segno >= MAIN_SEGS(sbi))
2030 			break;
2031 		offset = segno + 1;
2032 		valid_blocks = get_valid_blocks(sbi, segno, 0);
2033 		if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
2034 			continue;
2035 		if (valid_blocks > sbi->blocks_per_seg) {
2036 			f2fs_bug_on(sbi, 1);
2037 			continue;
2038 		}
2039 		mutex_lock(&dirty_i->seglist_lock);
2040 		__locate_dirty_segment(sbi, segno, DIRTY);
2041 		mutex_unlock(&dirty_i->seglist_lock);
2042 	}
2043 }
2044 
2045 static int init_victim_secmap(struct f2fs_sb_info *sbi)
2046 {
2047 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2048 	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
2049 
2050 	dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
2051 	if (!dirty_i->victim_secmap)
2052 		return -ENOMEM;
2053 	return 0;
2054 }
2055 
2056 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
2057 {
2058 	struct dirty_seglist_info *dirty_i;
2059 	unsigned int bitmap_size, i;
2060 
2061 	/* allocate memory for dirty segments list information */
2062 	dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
2063 	if (!dirty_i)
2064 		return -ENOMEM;
2065 
2066 	SM_I(sbi)->dirty_info = dirty_i;
2067 	mutex_init(&dirty_i->seglist_lock);
2068 
2069 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2070 
2071 	for (i = 0; i < NR_DIRTY_TYPE; i++) {
2072 		dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
2073 		if (!dirty_i->dirty_segmap[i])
2074 			return -ENOMEM;
2075 	}
2076 
2077 	init_dirty_segmap(sbi);
2078 	return init_victim_secmap(sbi);
2079 }
2080 
2081 /*
2082  * Update min, max modified time for cost-benefit GC algorithm
2083  */
2084 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
2085 {
2086 	struct sit_info *sit_i = SIT_I(sbi);
2087 	unsigned int segno;
2088 
2089 	mutex_lock(&sit_i->sentry_lock);
2090 
2091 	sit_i->min_mtime = LLONG_MAX;
2092 
2093 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
2094 		unsigned int i;
2095 		unsigned long long mtime = 0;
2096 
2097 		for (i = 0; i < sbi->segs_per_sec; i++)
2098 			mtime += get_seg_entry(sbi, segno + i)->mtime;
2099 
2100 		mtime = div_u64(mtime, sbi->segs_per_sec);
2101 
2102 		if (sit_i->min_mtime > mtime)
2103 			sit_i->min_mtime = mtime;
2104 	}
2105 	sit_i->max_mtime = get_mtime(sbi);
2106 	mutex_unlock(&sit_i->sentry_lock);
2107 }
2108 
2109 int build_segment_manager(struct f2fs_sb_info *sbi)
2110 {
2111 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2112 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2113 	struct f2fs_sm_info *sm_info;
2114 	int err;
2115 
2116 	sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
2117 	if (!sm_info)
2118 		return -ENOMEM;
2119 
2120 	/* init sm info */
2121 	sbi->sm_info = sm_info;
2122 	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2123 	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2124 	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
2125 	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
2126 	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
2127 	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
2128 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
2129 	sm_info->rec_prefree_segments = sm_info->main_segments *
2130 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
2131 	sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
2132 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
2133 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
2134 
2135 	INIT_LIST_HEAD(&sm_info->discard_list);
2136 	sm_info->nr_discards = 0;
2137 	sm_info->max_discards = 0;
2138 
2139 	sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
2140 
2141 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
2142 
2143 	if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
2144 		err = create_flush_cmd_control(sbi);
2145 		if (err)
2146 			return err;
2147 	}
2148 
2149 	err = build_sit_info(sbi);
2150 	if (err)
2151 		return err;
2152 	err = build_free_segmap(sbi);
2153 	if (err)
2154 		return err;
2155 	err = build_curseg(sbi);
2156 	if (err)
2157 		return err;
2158 
2159 	/* reinit free segmap based on SIT */
2160 	build_sit_entries(sbi);
2161 
2162 	init_free_segmap(sbi);
2163 	err = build_dirty_segmap(sbi);
2164 	if (err)
2165 		return err;
2166 
2167 	init_min_max_mtime(sbi);
2168 	return 0;
2169 }
2170 
2171 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
2172 		enum dirty_type dirty_type)
2173 {
2174 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2175 
2176 	mutex_lock(&dirty_i->seglist_lock);
2177 	kfree(dirty_i->dirty_segmap[dirty_type]);
2178 	dirty_i->nr_dirty[dirty_type] = 0;
2179 	mutex_unlock(&dirty_i->seglist_lock);
2180 }
2181 
2182 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
2183 {
2184 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2185 	kfree(dirty_i->victim_secmap);
2186 }
2187 
2188 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
2189 {
2190 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2191 	int i;
2192 
2193 	if (!dirty_i)
2194 		return;
2195 
2196 	/* discard pre-free/dirty segments list */
2197 	for (i = 0; i < NR_DIRTY_TYPE; i++)
2198 		discard_dirty_segmap(sbi, i);
2199 
2200 	destroy_victim_secmap(sbi);
2201 	SM_I(sbi)->dirty_info = NULL;
2202 	kfree(dirty_i);
2203 }
2204 
2205 static void destroy_curseg(struct f2fs_sb_info *sbi)
2206 {
2207 	struct curseg_info *array = SM_I(sbi)->curseg_array;
2208 	int i;
2209 
2210 	if (!array)
2211 		return;
2212 	SM_I(sbi)->curseg_array = NULL;
2213 	for (i = 0; i < NR_CURSEG_TYPE; i++)
2214 		kfree(array[i].sum_blk);
2215 	kfree(array);
2216 }
2217 
2218 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
2219 {
2220 	struct free_segmap_info *free_i = SM_I(sbi)->free_info;
2221 	if (!free_i)
2222 		return;
2223 	SM_I(sbi)->free_info = NULL;
2224 	kfree(free_i->free_segmap);
2225 	kfree(free_i->free_secmap);
2226 	kfree(free_i);
2227 }
2228 
2229 static void destroy_sit_info(struct f2fs_sb_info *sbi)
2230 {
2231 	struct sit_info *sit_i = SIT_I(sbi);
2232 	unsigned int start;
2233 
2234 	if (!sit_i)
2235 		return;
2236 
2237 	if (sit_i->sentries) {
2238 		for (start = 0; start < MAIN_SEGS(sbi); start++) {
2239 			kfree(sit_i->sentries[start].cur_valid_map);
2240 			kfree(sit_i->sentries[start].ckpt_valid_map);
2241 		}
2242 	}
2243 	kfree(sit_i->tmp_map);
2244 
2245 	vfree(sit_i->sentries);
2246 	vfree(sit_i->sec_entries);
2247 	kfree(sit_i->dirty_sentries_bitmap);
2248 
2249 	SM_I(sbi)->sit_info = NULL;
2250 	kfree(sit_i->sit_bitmap);
2251 	kfree(sit_i);
2252 }
2253 
2254 void destroy_segment_manager(struct f2fs_sb_info *sbi)
2255 {
2256 	struct f2fs_sm_info *sm_info = SM_I(sbi);
2257 
2258 	if (!sm_info)
2259 		return;
2260 	destroy_flush_cmd_control(sbi);
2261 	destroy_dirty_segmap(sbi);
2262 	destroy_curseg(sbi);
2263 	destroy_free_segmap(sbi);
2264 	destroy_sit_info(sbi);
2265 	sbi->sm_info = NULL;
2266 	kfree(sm_info);
2267 }
2268 
2269 int __init create_segment_manager_caches(void)
2270 {
2271 	discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
2272 			sizeof(struct discard_entry));
2273 	if (!discard_entry_slab)
2274 		goto fail;
2275 
2276 	sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
2277 			sizeof(struct sit_entry_set));
2278 	if (!sit_entry_set_slab)
2279 		goto destory_discard_entry;
2280 
2281 	inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
2282 			sizeof(struct inmem_pages));
2283 	if (!inmem_entry_slab)
2284 		goto destroy_sit_entry_set;
2285 	return 0;
2286 
2287 destroy_sit_entry_set:
2288 	kmem_cache_destroy(sit_entry_set_slab);
2289 destory_discard_entry:
2290 	kmem_cache_destroy(discard_entry_slab);
2291 fail:
2292 	return -ENOMEM;
2293 }
2294 
2295 void destroy_segment_manager_caches(void)
2296 {
2297 	kmem_cache_destroy(sit_entry_set_slab);
2298 	kmem_cache_destroy(discard_entry_slab);
2299 	kmem_cache_destroy(inmem_entry_slab);
2300 }
2301