xref: /openbmc/linux/fs/f2fs/segment.c (revision e2f1cf25)
1 /*
2  * fs/f2fs/segment.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/prefetch.h>
16 #include <linux/kthread.h>
17 #include <linux/vmalloc.h>
18 #include <linux/swap.h>
19 
20 #include "f2fs.h"
21 #include "segment.h"
22 #include "node.h"
23 #include "trace.h"
24 #include <trace/events/f2fs.h>
25 
26 #define __reverse_ffz(x) __reverse_ffs(~(x))
27 
28 static struct kmem_cache *discard_entry_slab;
29 static struct kmem_cache *sit_entry_set_slab;
30 static struct kmem_cache *inmem_entry_slab;
31 
32 /*
33  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
34  * MSB and LSB are reversed in a byte by f2fs_set_bit.
35  */
36 static inline unsigned long __reverse_ffs(unsigned long word)
37 {
38 	int num = 0;
39 
40 #if BITS_PER_LONG == 64
41 	if ((word & 0xffffffff) == 0) {
42 		num += 32;
43 		word >>= 32;
44 	}
45 #endif
46 	if ((word & 0xffff) == 0) {
47 		num += 16;
48 		word >>= 16;
49 	}
50 	if ((word & 0xff) == 0) {
51 		num += 8;
52 		word >>= 8;
53 	}
54 	if ((word & 0xf0) == 0)
55 		num += 4;
56 	else
57 		word >>= 4;
58 	if ((word & 0xc) == 0)
59 		num += 2;
60 	else
61 		word >>= 2;
62 	if ((word & 0x2) == 0)
63 		num += 1;
64 	return num;
65 }
66 
67 /*
68  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
69  * f2fs_set_bit makes MSB and LSB reversed in a byte.
70  * Example:
71  *                             LSB <--> MSB
72  *   f2fs_set_bit(0, bitmap) => 0000 0001
73  *   f2fs_set_bit(7, bitmap) => 1000 0000
74  */
75 static unsigned long __find_rev_next_bit(const unsigned long *addr,
76 			unsigned long size, unsigned long offset)
77 {
78 	while (!f2fs_test_bit(offset, (unsigned char *)addr))
79 		offset++;
80 
81 	if (offset > size)
82 		offset = size;
83 
84 	return offset;
85 #if 0
86 	const unsigned long *p = addr + BIT_WORD(offset);
87 	unsigned long result = offset & ~(BITS_PER_LONG - 1);
88 	unsigned long tmp;
89 	unsigned long mask, submask;
90 	unsigned long quot, rest;
91 
92 	if (offset >= size)
93 		return size;
94 
95 	size -= result;
96 	offset %= BITS_PER_LONG;
97 	if (!offset)
98 		goto aligned;
99 
100 	tmp = *(p++);
101 	quot = (offset >> 3) << 3;
102 	rest = offset & 0x7;
103 	mask = ~0UL << quot;
104 	submask = (unsigned char)(0xff << rest) >> rest;
105 	submask <<= quot;
106 	mask &= submask;
107 	tmp &= mask;
108 	if (size < BITS_PER_LONG)
109 		goto found_first;
110 	if (tmp)
111 		goto found_middle;
112 
113 	size -= BITS_PER_LONG;
114 	result += BITS_PER_LONG;
115 aligned:
116 	while (size & ~(BITS_PER_LONG-1)) {
117 		tmp = *(p++);
118 		if (tmp)
119 			goto found_middle;
120 		result += BITS_PER_LONG;
121 		size -= BITS_PER_LONG;
122 	}
123 	if (!size)
124 		return result;
125 	tmp = *p;
126 found_first:
127 	tmp &= (~0UL >> (BITS_PER_LONG - size));
128 	if (tmp == 0UL)		/* Are any bits set? */
129 		return result + size;   /* Nope. */
130 found_middle:
131 	return result + __reverse_ffs(tmp);
132 #endif
133 }
134 
135 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
136 			unsigned long size, unsigned long offset)
137 {
138 	while (f2fs_test_bit(offset, (unsigned char *)addr))
139 		offset++;
140 
141 	if (offset > size)
142 		offset = size;
143 
144 	return offset;
145 #if 0
146 	const unsigned long *p = addr + BIT_WORD(offset);
147 	unsigned long result = offset & ~(BITS_PER_LONG - 1);
148 	unsigned long tmp;
149 	unsigned long mask, submask;
150 	unsigned long quot, rest;
151 
152 	if (offset >= size)
153 		return size;
154 
155 	size -= result;
156 	offset %= BITS_PER_LONG;
157 	if (!offset)
158 		goto aligned;
159 
160 	tmp = *(p++);
161 	quot = (offset >> 3) << 3;
162 	rest = offset & 0x7;
163 	mask = ~(~0UL << quot);
164 	submask = (unsigned char)~((unsigned char)(0xff << rest) >> rest);
165 	submask <<= quot;
166 	mask += submask;
167 	tmp |= mask;
168 	if (size < BITS_PER_LONG)
169 		goto found_first;
170 	if (~tmp)
171 		goto found_middle;
172 
173 	size -= BITS_PER_LONG;
174 	result += BITS_PER_LONG;
175 aligned:
176 	while (size & ~(BITS_PER_LONG - 1)) {
177 		tmp = *(p++);
178 		if (~tmp)
179 			goto found_middle;
180 		result += BITS_PER_LONG;
181 		size -= BITS_PER_LONG;
182 	}
183 	if (!size)
184 		return result;
185 	tmp = *p;
186 
187 found_first:
188 	tmp |= ~0UL << size;
189 	if (tmp == ~0UL)        /* Are any bits zero? */
190 		return result + size;   /* Nope. */
191 found_middle:
192 	return result + __reverse_ffz(tmp);
193 #endif
194 }
195 
196 void register_inmem_page(struct inode *inode, struct page *page)
197 {
198 	struct f2fs_inode_info *fi = F2FS_I(inode);
199 	struct inmem_pages *new;
200 	int err;
201 
202 	SetPagePrivate(page);
203 	f2fs_trace_pid(page);
204 
205 	new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
206 
207 	/* add atomic page indices to the list */
208 	new->page = page;
209 	INIT_LIST_HEAD(&new->list);
210 retry:
211 	/* increase reference count with clean state */
212 	mutex_lock(&fi->inmem_lock);
213 	err = radix_tree_insert(&fi->inmem_root, page->index, new);
214 	if (err == -EEXIST) {
215 		mutex_unlock(&fi->inmem_lock);
216 		kmem_cache_free(inmem_entry_slab, new);
217 		return;
218 	} else if (err) {
219 		mutex_unlock(&fi->inmem_lock);
220 		goto retry;
221 	}
222 	get_page(page);
223 	list_add_tail(&new->list, &fi->inmem_pages);
224 	inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
225 	mutex_unlock(&fi->inmem_lock);
226 
227 	trace_f2fs_register_inmem_page(page, INMEM);
228 }
229 
230 void commit_inmem_pages(struct inode *inode, bool abort)
231 {
232 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
233 	struct f2fs_inode_info *fi = F2FS_I(inode);
234 	struct inmem_pages *cur, *tmp;
235 	bool submit_bio = false;
236 	struct f2fs_io_info fio = {
237 		.sbi = sbi,
238 		.type = DATA,
239 		.rw = WRITE_SYNC | REQ_PRIO,
240 		.encrypted_page = NULL,
241 	};
242 
243 	/*
244 	 * The abort is true only when f2fs_evict_inode is called.
245 	 * Basically, the f2fs_evict_inode doesn't produce any data writes, so
246 	 * that we don't need to call f2fs_balance_fs.
247 	 * Otherwise, f2fs_gc in f2fs_balance_fs can wait forever until this
248 	 * inode becomes free by iget_locked in f2fs_iget.
249 	 */
250 	if (!abort) {
251 		f2fs_balance_fs(sbi);
252 		f2fs_lock_op(sbi);
253 	}
254 
255 	mutex_lock(&fi->inmem_lock);
256 	list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
257 		if (!abort) {
258 			lock_page(cur->page);
259 			if (cur->page->mapping == inode->i_mapping) {
260 				set_page_dirty(cur->page);
261 				f2fs_wait_on_page_writeback(cur->page, DATA);
262 				if (clear_page_dirty_for_io(cur->page))
263 					inode_dec_dirty_pages(inode);
264 				trace_f2fs_commit_inmem_page(cur->page, INMEM);
265 				fio.page = cur->page;
266 				do_write_data_page(&fio);
267 				submit_bio = true;
268 			}
269 			f2fs_put_page(cur->page, 1);
270 		} else {
271 			trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP);
272 			put_page(cur->page);
273 		}
274 		radix_tree_delete(&fi->inmem_root, cur->page->index);
275 		list_del(&cur->list);
276 		kmem_cache_free(inmem_entry_slab, cur);
277 		dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
278 	}
279 	mutex_unlock(&fi->inmem_lock);
280 
281 	if (!abort) {
282 		f2fs_unlock_op(sbi);
283 		if (submit_bio)
284 			f2fs_submit_merged_bio(sbi, DATA, WRITE);
285 	}
286 }
287 
288 /*
289  * This function balances dirty node and dentry pages.
290  * In addition, it controls garbage collection.
291  */
292 void f2fs_balance_fs(struct f2fs_sb_info *sbi)
293 {
294 	/*
295 	 * We should do GC or end up with checkpoint, if there are so many dirty
296 	 * dir/node pages without enough free segments.
297 	 */
298 	if (has_not_enough_free_secs(sbi, 0)) {
299 		mutex_lock(&sbi->gc_mutex);
300 		f2fs_gc(sbi);
301 	}
302 }
303 
304 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
305 {
306 	/* try to shrink extent cache when there is no enough memory */
307 	f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
308 
309 	/* check the # of cached NAT entries and prefree segments */
310 	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
311 			excess_prefree_segs(sbi) ||
312 			!available_free_memory(sbi, INO_ENTRIES))
313 		f2fs_sync_fs(sbi->sb, true);
314 }
315 
316 static int issue_flush_thread(void *data)
317 {
318 	struct f2fs_sb_info *sbi = data;
319 	struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
320 	wait_queue_head_t *q = &fcc->flush_wait_queue;
321 repeat:
322 	if (kthread_should_stop())
323 		return 0;
324 
325 	if (!llist_empty(&fcc->issue_list)) {
326 		struct bio *bio = bio_alloc(GFP_NOIO, 0);
327 		struct flush_cmd *cmd, *next;
328 		int ret;
329 
330 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
331 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
332 
333 		bio->bi_bdev = sbi->sb->s_bdev;
334 		ret = submit_bio_wait(WRITE_FLUSH, bio);
335 
336 		llist_for_each_entry_safe(cmd, next,
337 					  fcc->dispatch_list, llnode) {
338 			cmd->ret = ret;
339 			complete(&cmd->wait);
340 		}
341 		bio_put(bio);
342 		fcc->dispatch_list = NULL;
343 	}
344 
345 	wait_event_interruptible(*q,
346 		kthread_should_stop() || !llist_empty(&fcc->issue_list));
347 	goto repeat;
348 }
349 
350 int f2fs_issue_flush(struct f2fs_sb_info *sbi)
351 {
352 	struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
353 	struct flush_cmd cmd;
354 
355 	trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
356 					test_opt(sbi, FLUSH_MERGE));
357 
358 	if (test_opt(sbi, NOBARRIER))
359 		return 0;
360 
361 	if (!test_opt(sbi, FLUSH_MERGE))
362 		return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL);
363 
364 	init_completion(&cmd.wait);
365 
366 	llist_add(&cmd.llnode, &fcc->issue_list);
367 
368 	if (!fcc->dispatch_list)
369 		wake_up(&fcc->flush_wait_queue);
370 
371 	wait_for_completion(&cmd.wait);
372 
373 	return cmd.ret;
374 }
375 
376 int create_flush_cmd_control(struct f2fs_sb_info *sbi)
377 {
378 	dev_t dev = sbi->sb->s_bdev->bd_dev;
379 	struct flush_cmd_control *fcc;
380 	int err = 0;
381 
382 	fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
383 	if (!fcc)
384 		return -ENOMEM;
385 	init_waitqueue_head(&fcc->flush_wait_queue);
386 	init_llist_head(&fcc->issue_list);
387 	SM_I(sbi)->cmd_control_info = fcc;
388 	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
389 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
390 	if (IS_ERR(fcc->f2fs_issue_flush)) {
391 		err = PTR_ERR(fcc->f2fs_issue_flush);
392 		kfree(fcc);
393 		SM_I(sbi)->cmd_control_info = NULL;
394 		return err;
395 	}
396 
397 	return err;
398 }
399 
400 void destroy_flush_cmd_control(struct f2fs_sb_info *sbi)
401 {
402 	struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
403 
404 	if (fcc && fcc->f2fs_issue_flush)
405 		kthread_stop(fcc->f2fs_issue_flush);
406 	kfree(fcc);
407 	SM_I(sbi)->cmd_control_info = NULL;
408 }
409 
410 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
411 		enum dirty_type dirty_type)
412 {
413 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
414 
415 	/* need not be added */
416 	if (IS_CURSEG(sbi, segno))
417 		return;
418 
419 	if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
420 		dirty_i->nr_dirty[dirty_type]++;
421 
422 	if (dirty_type == DIRTY) {
423 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
424 		enum dirty_type t = sentry->type;
425 
426 		if (unlikely(t >= DIRTY)) {
427 			f2fs_bug_on(sbi, 1);
428 			return;
429 		}
430 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
431 			dirty_i->nr_dirty[t]++;
432 	}
433 }
434 
435 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
436 		enum dirty_type dirty_type)
437 {
438 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
439 
440 	if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
441 		dirty_i->nr_dirty[dirty_type]--;
442 
443 	if (dirty_type == DIRTY) {
444 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
445 		enum dirty_type t = sentry->type;
446 
447 		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
448 			dirty_i->nr_dirty[t]--;
449 
450 		if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
451 			clear_bit(GET_SECNO(sbi, segno),
452 						dirty_i->victim_secmap);
453 	}
454 }
455 
456 /*
457  * Should not occur error such as -ENOMEM.
458  * Adding dirty entry into seglist is not critical operation.
459  * If a given segment is one of current working segments, it won't be added.
460  */
461 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
462 {
463 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
464 	unsigned short valid_blocks;
465 
466 	if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
467 		return;
468 
469 	mutex_lock(&dirty_i->seglist_lock);
470 
471 	valid_blocks = get_valid_blocks(sbi, segno, 0);
472 
473 	if (valid_blocks == 0) {
474 		__locate_dirty_segment(sbi, segno, PRE);
475 		__remove_dirty_segment(sbi, segno, DIRTY);
476 	} else if (valid_blocks < sbi->blocks_per_seg) {
477 		__locate_dirty_segment(sbi, segno, DIRTY);
478 	} else {
479 		/* Recovery routine with SSR needs this */
480 		__remove_dirty_segment(sbi, segno, DIRTY);
481 	}
482 
483 	mutex_unlock(&dirty_i->seglist_lock);
484 }
485 
486 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
487 				block_t blkstart, block_t blklen)
488 {
489 	sector_t start = SECTOR_FROM_BLOCK(blkstart);
490 	sector_t len = SECTOR_FROM_BLOCK(blklen);
491 	struct seg_entry *se;
492 	unsigned int offset;
493 	block_t i;
494 
495 	for (i = blkstart; i < blkstart + blklen; i++) {
496 		se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
497 		offset = GET_BLKOFF_FROM_SEG0(sbi, i);
498 
499 		if (!f2fs_test_and_set_bit(offset, se->discard_map))
500 			sbi->discard_blks--;
501 	}
502 	trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
503 	return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
504 }
505 
506 void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
507 {
508 	int err = -ENOTSUPP;
509 
510 	if (test_opt(sbi, DISCARD)) {
511 		struct seg_entry *se = get_seg_entry(sbi,
512 				GET_SEGNO(sbi, blkaddr));
513 		unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
514 
515 		if (f2fs_test_bit(offset, se->discard_map))
516 			return;
517 
518 		err = f2fs_issue_discard(sbi, blkaddr, 1);
519 	}
520 
521 	if (err)
522 		update_meta_page(sbi, NULL, blkaddr);
523 }
524 
525 static void __add_discard_entry(struct f2fs_sb_info *sbi,
526 		struct cp_control *cpc, struct seg_entry *se,
527 		unsigned int start, unsigned int end)
528 {
529 	struct list_head *head = &SM_I(sbi)->discard_list;
530 	struct discard_entry *new, *last;
531 
532 	if (!list_empty(head)) {
533 		last = list_last_entry(head, struct discard_entry, list);
534 		if (START_BLOCK(sbi, cpc->trim_start) + start ==
535 						last->blkaddr + last->len) {
536 			last->len += end - start;
537 			goto done;
538 		}
539 	}
540 
541 	new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
542 	INIT_LIST_HEAD(&new->list);
543 	new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start;
544 	new->len = end - start;
545 	list_add_tail(&new->list, head);
546 done:
547 	SM_I(sbi)->nr_discards += end - start;
548 }
549 
550 static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
551 {
552 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
553 	int max_blocks = sbi->blocks_per_seg;
554 	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
555 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
556 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
557 	unsigned long *discard_map = (unsigned long *)se->discard_map;
558 	unsigned long *dmap = SIT_I(sbi)->tmp_map;
559 	unsigned int start = 0, end = -1;
560 	bool force = (cpc->reason == CP_DISCARD);
561 	int i;
562 
563 	if (se->valid_blocks == max_blocks)
564 		return;
565 
566 	if (!force) {
567 		if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
568 		    SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards)
569 			return;
570 	}
571 
572 	/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
573 	for (i = 0; i < entries; i++)
574 		dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
575 				(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
576 
577 	while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
578 		start = __find_rev_next_bit(dmap, max_blocks, end + 1);
579 		if (start >= max_blocks)
580 			break;
581 
582 		end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
583 		__add_discard_entry(sbi, cpc, se, start, end);
584 	}
585 }
586 
587 void release_discard_addrs(struct f2fs_sb_info *sbi)
588 {
589 	struct list_head *head = &(SM_I(sbi)->discard_list);
590 	struct discard_entry *entry, *this;
591 
592 	/* drop caches */
593 	list_for_each_entry_safe(entry, this, head, list) {
594 		list_del(&entry->list);
595 		kmem_cache_free(discard_entry_slab, entry);
596 	}
597 }
598 
599 /*
600  * Should call clear_prefree_segments after checkpoint is done.
601  */
602 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
603 {
604 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
605 	unsigned int segno;
606 
607 	mutex_lock(&dirty_i->seglist_lock);
608 	for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
609 		__set_test_and_free(sbi, segno);
610 	mutex_unlock(&dirty_i->seglist_lock);
611 }
612 
613 void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
614 {
615 	struct list_head *head = &(SM_I(sbi)->discard_list);
616 	struct discard_entry *entry, *this;
617 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
618 	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
619 	unsigned int start = 0, end = -1;
620 
621 	mutex_lock(&dirty_i->seglist_lock);
622 
623 	while (1) {
624 		int i;
625 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
626 		if (start >= MAIN_SEGS(sbi))
627 			break;
628 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
629 								start + 1);
630 
631 		for (i = start; i < end; i++)
632 			clear_bit(i, prefree_map);
633 
634 		dirty_i->nr_dirty[PRE] -= end - start;
635 
636 		if (!test_opt(sbi, DISCARD))
637 			continue;
638 
639 		f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
640 				(end - start) << sbi->log_blocks_per_seg);
641 	}
642 	mutex_unlock(&dirty_i->seglist_lock);
643 
644 	/* send small discards */
645 	list_for_each_entry_safe(entry, this, head, list) {
646 		if (cpc->reason == CP_DISCARD && entry->len < cpc->trim_minlen)
647 			goto skip;
648 		f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
649 		cpc->trimmed += entry->len;
650 skip:
651 		list_del(&entry->list);
652 		SM_I(sbi)->nr_discards -= entry->len;
653 		kmem_cache_free(discard_entry_slab, entry);
654 	}
655 }
656 
657 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
658 {
659 	struct sit_info *sit_i = SIT_I(sbi);
660 
661 	if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
662 		sit_i->dirty_sentries++;
663 		return false;
664 	}
665 
666 	return true;
667 }
668 
669 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
670 					unsigned int segno, int modified)
671 {
672 	struct seg_entry *se = get_seg_entry(sbi, segno);
673 	se->type = type;
674 	if (modified)
675 		__mark_sit_entry_dirty(sbi, segno);
676 }
677 
678 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
679 {
680 	struct seg_entry *se;
681 	unsigned int segno, offset;
682 	long int new_vblocks;
683 
684 	segno = GET_SEGNO(sbi, blkaddr);
685 
686 	se = get_seg_entry(sbi, segno);
687 	new_vblocks = se->valid_blocks + del;
688 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
689 
690 	f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
691 				(new_vblocks > sbi->blocks_per_seg)));
692 
693 	se->valid_blocks = new_vblocks;
694 	se->mtime = get_mtime(sbi);
695 	SIT_I(sbi)->max_mtime = se->mtime;
696 
697 	/* Update valid block bitmap */
698 	if (del > 0) {
699 		if (f2fs_test_and_set_bit(offset, se->cur_valid_map))
700 			f2fs_bug_on(sbi, 1);
701 		if (!f2fs_test_and_set_bit(offset, se->discard_map))
702 			sbi->discard_blks--;
703 	} else {
704 		if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map))
705 			f2fs_bug_on(sbi, 1);
706 		if (f2fs_test_and_clear_bit(offset, se->discard_map))
707 			sbi->discard_blks++;
708 	}
709 	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
710 		se->ckpt_valid_blocks += del;
711 
712 	__mark_sit_entry_dirty(sbi, segno);
713 
714 	/* update total number of valid blocks to be written in ckpt area */
715 	SIT_I(sbi)->written_valid_blocks += del;
716 
717 	if (sbi->segs_per_sec > 1)
718 		get_sec_entry(sbi, segno)->valid_blocks += del;
719 }
720 
721 void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
722 {
723 	update_sit_entry(sbi, new, 1);
724 	if (GET_SEGNO(sbi, old) != NULL_SEGNO)
725 		update_sit_entry(sbi, old, -1);
726 
727 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
728 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
729 }
730 
731 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
732 {
733 	unsigned int segno = GET_SEGNO(sbi, addr);
734 	struct sit_info *sit_i = SIT_I(sbi);
735 
736 	f2fs_bug_on(sbi, addr == NULL_ADDR);
737 	if (addr == NEW_ADDR)
738 		return;
739 
740 	/* add it into sit main buffer */
741 	mutex_lock(&sit_i->sentry_lock);
742 
743 	update_sit_entry(sbi, addr, -1);
744 
745 	/* add it into dirty seglist */
746 	locate_dirty_segment(sbi, segno);
747 
748 	mutex_unlock(&sit_i->sentry_lock);
749 }
750 
751 /*
752  * This function should be resided under the curseg_mutex lock
753  */
754 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
755 					struct f2fs_summary *sum)
756 {
757 	struct curseg_info *curseg = CURSEG_I(sbi, type);
758 	void *addr = curseg->sum_blk;
759 	addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
760 	memcpy(addr, sum, sizeof(struct f2fs_summary));
761 }
762 
763 /*
764  * Calculate the number of current summary pages for writing
765  */
766 int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
767 {
768 	int valid_sum_count = 0;
769 	int i, sum_in_page;
770 
771 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
772 		if (sbi->ckpt->alloc_type[i] == SSR)
773 			valid_sum_count += sbi->blocks_per_seg;
774 		else {
775 			if (for_ra)
776 				valid_sum_count += le16_to_cpu(
777 					F2FS_CKPT(sbi)->cur_data_blkoff[i]);
778 			else
779 				valid_sum_count += curseg_blkoff(sbi, i);
780 		}
781 	}
782 
783 	sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
784 			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
785 	if (valid_sum_count <= sum_in_page)
786 		return 1;
787 	else if ((valid_sum_count - sum_in_page) <=
788 		(PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
789 		return 2;
790 	return 3;
791 }
792 
793 /*
794  * Caller should put this summary page
795  */
796 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
797 {
798 	return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
799 }
800 
801 void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
802 {
803 	struct page *page = grab_meta_page(sbi, blk_addr);
804 	void *dst = page_address(page);
805 
806 	if (src)
807 		memcpy(dst, src, PAGE_CACHE_SIZE);
808 	else
809 		memset(dst, 0, PAGE_CACHE_SIZE);
810 	set_page_dirty(page);
811 	f2fs_put_page(page, 1);
812 }
813 
814 static void write_sum_page(struct f2fs_sb_info *sbi,
815 			struct f2fs_summary_block *sum_blk, block_t blk_addr)
816 {
817 	update_meta_page(sbi, (void *)sum_blk, blk_addr);
818 }
819 
820 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
821 {
822 	struct curseg_info *curseg = CURSEG_I(sbi, type);
823 	unsigned int segno = curseg->segno + 1;
824 	struct free_segmap_info *free_i = FREE_I(sbi);
825 
826 	if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
827 		return !test_bit(segno, free_i->free_segmap);
828 	return 0;
829 }
830 
831 /*
832  * Find a new segment from the free segments bitmap to right order
833  * This function should be returned with success, otherwise BUG
834  */
835 static void get_new_segment(struct f2fs_sb_info *sbi,
836 			unsigned int *newseg, bool new_sec, int dir)
837 {
838 	struct free_segmap_info *free_i = FREE_I(sbi);
839 	unsigned int segno, secno, zoneno;
840 	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
841 	unsigned int hint = *newseg / sbi->segs_per_sec;
842 	unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
843 	unsigned int left_start = hint;
844 	bool init = true;
845 	int go_left = 0;
846 	int i;
847 
848 	spin_lock(&free_i->segmap_lock);
849 
850 	if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
851 		segno = find_next_zero_bit(free_i->free_segmap,
852 					MAIN_SEGS(sbi), *newseg + 1);
853 		if (segno - *newseg < sbi->segs_per_sec -
854 					(*newseg % sbi->segs_per_sec))
855 			goto got_it;
856 	}
857 find_other_zone:
858 	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
859 	if (secno >= MAIN_SECS(sbi)) {
860 		if (dir == ALLOC_RIGHT) {
861 			secno = find_next_zero_bit(free_i->free_secmap,
862 							MAIN_SECS(sbi), 0);
863 			f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
864 		} else {
865 			go_left = 1;
866 			left_start = hint - 1;
867 		}
868 	}
869 	if (go_left == 0)
870 		goto skip_left;
871 
872 	while (test_bit(left_start, free_i->free_secmap)) {
873 		if (left_start > 0) {
874 			left_start--;
875 			continue;
876 		}
877 		left_start = find_next_zero_bit(free_i->free_secmap,
878 							MAIN_SECS(sbi), 0);
879 		f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
880 		break;
881 	}
882 	secno = left_start;
883 skip_left:
884 	hint = secno;
885 	segno = secno * sbi->segs_per_sec;
886 	zoneno = secno / sbi->secs_per_zone;
887 
888 	/* give up on finding another zone */
889 	if (!init)
890 		goto got_it;
891 	if (sbi->secs_per_zone == 1)
892 		goto got_it;
893 	if (zoneno == old_zoneno)
894 		goto got_it;
895 	if (dir == ALLOC_LEFT) {
896 		if (!go_left && zoneno + 1 >= total_zones)
897 			goto got_it;
898 		if (go_left && zoneno == 0)
899 			goto got_it;
900 	}
901 	for (i = 0; i < NR_CURSEG_TYPE; i++)
902 		if (CURSEG_I(sbi, i)->zone == zoneno)
903 			break;
904 
905 	if (i < NR_CURSEG_TYPE) {
906 		/* zone is in user, try another */
907 		if (go_left)
908 			hint = zoneno * sbi->secs_per_zone - 1;
909 		else if (zoneno + 1 >= total_zones)
910 			hint = 0;
911 		else
912 			hint = (zoneno + 1) * sbi->secs_per_zone;
913 		init = false;
914 		goto find_other_zone;
915 	}
916 got_it:
917 	/* set it as dirty segment in free segmap */
918 	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
919 	__set_inuse(sbi, segno);
920 	*newseg = segno;
921 	spin_unlock(&free_i->segmap_lock);
922 }
923 
924 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
925 {
926 	struct curseg_info *curseg = CURSEG_I(sbi, type);
927 	struct summary_footer *sum_footer;
928 
929 	curseg->segno = curseg->next_segno;
930 	curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
931 	curseg->next_blkoff = 0;
932 	curseg->next_segno = NULL_SEGNO;
933 
934 	sum_footer = &(curseg->sum_blk->footer);
935 	memset(sum_footer, 0, sizeof(struct summary_footer));
936 	if (IS_DATASEG(type))
937 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
938 	if (IS_NODESEG(type))
939 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
940 	__set_sit_entry_type(sbi, type, curseg->segno, modified);
941 }
942 
943 /*
944  * Allocate a current working segment.
945  * This function always allocates a free segment in LFS manner.
946  */
947 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
948 {
949 	struct curseg_info *curseg = CURSEG_I(sbi, type);
950 	unsigned int segno = curseg->segno;
951 	int dir = ALLOC_LEFT;
952 
953 	write_sum_page(sbi, curseg->sum_blk,
954 				GET_SUM_BLOCK(sbi, segno));
955 	if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
956 		dir = ALLOC_RIGHT;
957 
958 	if (test_opt(sbi, NOHEAP))
959 		dir = ALLOC_RIGHT;
960 
961 	get_new_segment(sbi, &segno, new_sec, dir);
962 	curseg->next_segno = segno;
963 	reset_curseg(sbi, type, 1);
964 	curseg->alloc_type = LFS;
965 }
966 
967 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
968 			struct curseg_info *seg, block_t start)
969 {
970 	struct seg_entry *se = get_seg_entry(sbi, seg->segno);
971 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
972 	unsigned long *target_map = SIT_I(sbi)->tmp_map;
973 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
974 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
975 	int i, pos;
976 
977 	for (i = 0; i < entries; i++)
978 		target_map[i] = ckpt_map[i] | cur_map[i];
979 
980 	pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
981 
982 	seg->next_blkoff = pos;
983 }
984 
985 /*
986  * If a segment is written by LFS manner, next block offset is just obtained
987  * by increasing the current block offset. However, if a segment is written by
988  * SSR manner, next block offset obtained by calling __next_free_blkoff
989  */
990 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
991 				struct curseg_info *seg)
992 {
993 	if (seg->alloc_type == SSR)
994 		__next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
995 	else
996 		seg->next_blkoff++;
997 }
998 
999 /*
1000  * This function always allocates a used segment(from dirty seglist) by SSR
1001  * manner, so it should recover the existing segment information of valid blocks
1002  */
1003 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
1004 {
1005 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1006 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1007 	unsigned int new_segno = curseg->next_segno;
1008 	struct f2fs_summary_block *sum_node;
1009 	struct page *sum_page;
1010 
1011 	write_sum_page(sbi, curseg->sum_blk,
1012 				GET_SUM_BLOCK(sbi, curseg->segno));
1013 	__set_test_and_inuse(sbi, new_segno);
1014 
1015 	mutex_lock(&dirty_i->seglist_lock);
1016 	__remove_dirty_segment(sbi, new_segno, PRE);
1017 	__remove_dirty_segment(sbi, new_segno, DIRTY);
1018 	mutex_unlock(&dirty_i->seglist_lock);
1019 
1020 	reset_curseg(sbi, type, 1);
1021 	curseg->alloc_type = SSR;
1022 	__next_free_blkoff(sbi, curseg, 0);
1023 
1024 	if (reuse) {
1025 		sum_page = get_sum_page(sbi, new_segno);
1026 		sum_node = (struct f2fs_summary_block *)page_address(sum_page);
1027 		memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
1028 		f2fs_put_page(sum_page, 1);
1029 	}
1030 }
1031 
1032 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
1033 {
1034 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1035 	const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
1036 
1037 	if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
1038 		return v_ops->get_victim(sbi,
1039 				&(curseg)->next_segno, BG_GC, type, SSR);
1040 
1041 	/* For data segments, let's do SSR more intensively */
1042 	for (; type >= CURSEG_HOT_DATA; type--)
1043 		if (v_ops->get_victim(sbi, &(curseg)->next_segno,
1044 						BG_GC, type, SSR))
1045 			return 1;
1046 	return 0;
1047 }
1048 
1049 /*
1050  * flush out current segment and replace it with new segment
1051  * This function should be returned with success, otherwise BUG
1052  */
1053 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
1054 						int type, bool force)
1055 {
1056 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1057 
1058 	if (force)
1059 		new_curseg(sbi, type, true);
1060 	else if (type == CURSEG_WARM_NODE)
1061 		new_curseg(sbi, type, false);
1062 	else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
1063 		new_curseg(sbi, type, false);
1064 	else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
1065 		change_curseg(sbi, type, true);
1066 	else
1067 		new_curseg(sbi, type, false);
1068 
1069 	stat_inc_seg_type(sbi, curseg);
1070 }
1071 
1072 static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type)
1073 {
1074 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1075 	unsigned int old_segno;
1076 
1077 	old_segno = curseg->segno;
1078 	SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
1079 	locate_dirty_segment(sbi, old_segno);
1080 }
1081 
1082 void allocate_new_segments(struct f2fs_sb_info *sbi)
1083 {
1084 	int i;
1085 
1086 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
1087 		__allocate_new_segments(sbi, i);
1088 }
1089 
1090 static const struct segment_allocation default_salloc_ops = {
1091 	.allocate_segment = allocate_segment_by_default,
1092 };
1093 
1094 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
1095 {
1096 	__u64 start = F2FS_BYTES_TO_BLK(range->start);
1097 	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
1098 	unsigned int start_segno, end_segno;
1099 	struct cp_control cpc;
1100 
1101 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
1102 		return -EINVAL;
1103 
1104 	cpc.trimmed = 0;
1105 	if (end <= MAIN_BLKADDR(sbi))
1106 		goto out;
1107 
1108 	/* start/end segment number in main_area */
1109 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
1110 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
1111 						GET_SEGNO(sbi, end);
1112 	cpc.reason = CP_DISCARD;
1113 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
1114 
1115 	/* do checkpoint to issue discard commands safely */
1116 	for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
1117 		cpc.trim_start = start_segno;
1118 
1119 		if (sbi->discard_blks == 0)
1120 			break;
1121 		else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
1122 			cpc.trim_end = end_segno;
1123 		else
1124 			cpc.trim_end = min_t(unsigned int,
1125 				rounddown(start_segno +
1126 				BATCHED_TRIM_SEGMENTS(sbi),
1127 				sbi->segs_per_sec) - 1, end_segno);
1128 
1129 		mutex_lock(&sbi->gc_mutex);
1130 		write_checkpoint(sbi, &cpc);
1131 		mutex_unlock(&sbi->gc_mutex);
1132 	}
1133 out:
1134 	range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
1135 	return 0;
1136 }
1137 
1138 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
1139 {
1140 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1141 	if (curseg->next_blkoff < sbi->blocks_per_seg)
1142 		return true;
1143 	return false;
1144 }
1145 
1146 static int __get_segment_type_2(struct page *page, enum page_type p_type)
1147 {
1148 	if (p_type == DATA)
1149 		return CURSEG_HOT_DATA;
1150 	else
1151 		return CURSEG_HOT_NODE;
1152 }
1153 
1154 static int __get_segment_type_4(struct page *page, enum page_type p_type)
1155 {
1156 	if (p_type == DATA) {
1157 		struct inode *inode = page->mapping->host;
1158 
1159 		if (S_ISDIR(inode->i_mode))
1160 			return CURSEG_HOT_DATA;
1161 		else
1162 			return CURSEG_COLD_DATA;
1163 	} else {
1164 		if (IS_DNODE(page) && is_cold_node(page))
1165 			return CURSEG_WARM_NODE;
1166 		else
1167 			return CURSEG_COLD_NODE;
1168 	}
1169 }
1170 
1171 static int __get_segment_type_6(struct page *page, enum page_type p_type)
1172 {
1173 	if (p_type == DATA) {
1174 		struct inode *inode = page->mapping->host;
1175 
1176 		if (S_ISDIR(inode->i_mode))
1177 			return CURSEG_HOT_DATA;
1178 		else if (is_cold_data(page) || file_is_cold(inode))
1179 			return CURSEG_COLD_DATA;
1180 		else
1181 			return CURSEG_WARM_DATA;
1182 	} else {
1183 		if (IS_DNODE(page))
1184 			return is_cold_node(page) ? CURSEG_WARM_NODE :
1185 						CURSEG_HOT_NODE;
1186 		else
1187 			return CURSEG_COLD_NODE;
1188 	}
1189 }
1190 
1191 static int __get_segment_type(struct page *page, enum page_type p_type)
1192 {
1193 	switch (F2FS_P_SB(page)->active_logs) {
1194 	case 2:
1195 		return __get_segment_type_2(page, p_type);
1196 	case 4:
1197 		return __get_segment_type_4(page, p_type);
1198 	}
1199 	/* NR_CURSEG_TYPE(6) logs by default */
1200 	f2fs_bug_on(F2FS_P_SB(page),
1201 		F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
1202 	return __get_segment_type_6(page, p_type);
1203 }
1204 
1205 void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
1206 		block_t old_blkaddr, block_t *new_blkaddr,
1207 		struct f2fs_summary *sum, int type)
1208 {
1209 	struct sit_info *sit_i = SIT_I(sbi);
1210 	struct curseg_info *curseg;
1211 	bool direct_io = (type == CURSEG_DIRECT_IO);
1212 
1213 	type = direct_io ? CURSEG_WARM_DATA : type;
1214 
1215 	curseg = CURSEG_I(sbi, type);
1216 
1217 	mutex_lock(&curseg->curseg_mutex);
1218 	mutex_lock(&sit_i->sentry_lock);
1219 
1220 	/* direct_io'ed data is aligned to the segment for better performance */
1221 	if (direct_io && curseg->next_blkoff)
1222 		__allocate_new_segments(sbi, type);
1223 
1224 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
1225 
1226 	/*
1227 	 * __add_sum_entry should be resided under the curseg_mutex
1228 	 * because, this function updates a summary entry in the
1229 	 * current summary block.
1230 	 */
1231 	__add_sum_entry(sbi, type, sum);
1232 
1233 	__refresh_next_blkoff(sbi, curseg);
1234 
1235 	stat_inc_block_count(sbi, curseg);
1236 
1237 	if (!__has_curseg_space(sbi, type))
1238 		sit_i->s_ops->allocate_segment(sbi, type, false);
1239 	/*
1240 	 * SIT information should be updated before segment allocation,
1241 	 * since SSR needs latest valid block information.
1242 	 */
1243 	refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
1244 
1245 	mutex_unlock(&sit_i->sentry_lock);
1246 
1247 	if (page && IS_NODESEG(type))
1248 		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
1249 
1250 	mutex_unlock(&curseg->curseg_mutex);
1251 }
1252 
1253 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
1254 {
1255 	int type = __get_segment_type(fio->page, fio->type);
1256 
1257 	allocate_data_block(fio->sbi, fio->page, fio->blk_addr,
1258 					&fio->blk_addr, sum, type);
1259 
1260 	/* writeout dirty page into bdev */
1261 	f2fs_submit_page_mbio(fio);
1262 }
1263 
1264 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
1265 {
1266 	struct f2fs_io_info fio = {
1267 		.sbi = sbi,
1268 		.type = META,
1269 		.rw = WRITE_SYNC | REQ_META | REQ_PRIO,
1270 		.blk_addr = page->index,
1271 		.page = page,
1272 		.encrypted_page = NULL,
1273 	};
1274 
1275 	set_page_writeback(page);
1276 	f2fs_submit_page_mbio(&fio);
1277 }
1278 
1279 void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
1280 {
1281 	struct f2fs_summary sum;
1282 
1283 	set_summary(&sum, nid, 0, 0);
1284 	do_write_page(&sum, fio);
1285 }
1286 
1287 void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
1288 {
1289 	struct f2fs_sb_info *sbi = fio->sbi;
1290 	struct f2fs_summary sum;
1291 	struct node_info ni;
1292 
1293 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
1294 	get_node_info(sbi, dn->nid, &ni);
1295 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1296 	do_write_page(&sum, fio);
1297 	dn->data_blkaddr = fio->blk_addr;
1298 }
1299 
1300 void rewrite_data_page(struct f2fs_io_info *fio)
1301 {
1302 	stat_inc_inplace_blocks(fio->sbi);
1303 	f2fs_submit_page_mbio(fio);
1304 }
1305 
1306 static void __f2fs_replace_block(struct f2fs_sb_info *sbi,
1307 				struct f2fs_summary *sum,
1308 				block_t old_blkaddr, block_t new_blkaddr,
1309 				bool recover_curseg)
1310 {
1311 	struct sit_info *sit_i = SIT_I(sbi);
1312 	struct curseg_info *curseg;
1313 	unsigned int segno, old_cursegno;
1314 	struct seg_entry *se;
1315 	int type;
1316 	unsigned short old_blkoff;
1317 
1318 	segno = GET_SEGNO(sbi, new_blkaddr);
1319 	se = get_seg_entry(sbi, segno);
1320 	type = se->type;
1321 
1322 	if (!recover_curseg) {
1323 		/* for recovery flow */
1324 		if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
1325 			if (old_blkaddr == NULL_ADDR)
1326 				type = CURSEG_COLD_DATA;
1327 			else
1328 				type = CURSEG_WARM_DATA;
1329 		}
1330 	} else {
1331 		if (!IS_CURSEG(sbi, segno))
1332 			type = CURSEG_WARM_DATA;
1333 	}
1334 
1335 	curseg = CURSEG_I(sbi, type);
1336 
1337 	mutex_lock(&curseg->curseg_mutex);
1338 	mutex_lock(&sit_i->sentry_lock);
1339 
1340 	old_cursegno = curseg->segno;
1341 	old_blkoff = curseg->next_blkoff;
1342 
1343 	/* change the current segment */
1344 	if (segno != curseg->segno) {
1345 		curseg->next_segno = segno;
1346 		change_curseg(sbi, type, true);
1347 	}
1348 
1349 	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
1350 	__add_sum_entry(sbi, type, sum);
1351 
1352 	refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
1353 	locate_dirty_segment(sbi, old_cursegno);
1354 
1355 	if (recover_curseg) {
1356 		if (old_cursegno != curseg->segno) {
1357 			curseg->next_segno = old_cursegno;
1358 			change_curseg(sbi, type, true);
1359 		}
1360 		curseg->next_blkoff = old_blkoff;
1361 	}
1362 
1363 	mutex_unlock(&sit_i->sentry_lock);
1364 	mutex_unlock(&curseg->curseg_mutex);
1365 }
1366 
1367 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
1368 				block_t old_addr, block_t new_addr,
1369 				unsigned char version, bool recover_curseg)
1370 {
1371 	struct f2fs_summary sum;
1372 
1373 	set_summary(&sum, dn->nid, dn->ofs_in_node, version);
1374 
1375 	__f2fs_replace_block(sbi, &sum, old_addr, new_addr, recover_curseg);
1376 
1377 	dn->data_blkaddr = new_addr;
1378 	set_data_blkaddr(dn);
1379 	f2fs_update_extent_cache(dn);
1380 }
1381 
1382 static inline bool is_merged_page(struct f2fs_sb_info *sbi,
1383 					struct page *page, enum page_type type)
1384 {
1385 	enum page_type btype = PAGE_TYPE_OF_BIO(type);
1386 	struct f2fs_bio_info *io = &sbi->write_io[btype];
1387 	struct bio_vec *bvec;
1388 	struct page *target;
1389 	int i;
1390 
1391 	down_read(&io->io_rwsem);
1392 	if (!io->bio) {
1393 		up_read(&io->io_rwsem);
1394 		return false;
1395 	}
1396 
1397 	bio_for_each_segment_all(bvec, io->bio, i) {
1398 
1399 		if (bvec->bv_page->mapping) {
1400 			target = bvec->bv_page;
1401 		} else {
1402 			struct f2fs_crypto_ctx *ctx;
1403 
1404 			/* encrypted page */
1405 			ctx = (struct f2fs_crypto_ctx *)page_private(
1406 								bvec->bv_page);
1407 			target = ctx->w.control_page;
1408 		}
1409 
1410 		if (page == target) {
1411 			up_read(&io->io_rwsem);
1412 			return true;
1413 		}
1414 	}
1415 
1416 	up_read(&io->io_rwsem);
1417 	return false;
1418 }
1419 
1420 void f2fs_wait_on_page_writeback(struct page *page,
1421 				enum page_type type)
1422 {
1423 	if (PageWriteback(page)) {
1424 		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1425 
1426 		if (is_merged_page(sbi, page, type))
1427 			f2fs_submit_merged_bio(sbi, type, WRITE);
1428 		wait_on_page_writeback(page);
1429 	}
1430 }
1431 
1432 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1433 {
1434 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1435 	struct curseg_info *seg_i;
1436 	unsigned char *kaddr;
1437 	struct page *page;
1438 	block_t start;
1439 	int i, j, offset;
1440 
1441 	start = start_sum_block(sbi);
1442 
1443 	page = get_meta_page(sbi, start++);
1444 	kaddr = (unsigned char *)page_address(page);
1445 
1446 	/* Step 1: restore nat cache */
1447 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1448 	memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
1449 
1450 	/* Step 2: restore sit cache */
1451 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1452 	memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
1453 						SUM_JOURNAL_SIZE);
1454 	offset = 2 * SUM_JOURNAL_SIZE;
1455 
1456 	/* Step 3: restore summary entries */
1457 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1458 		unsigned short blk_off;
1459 		unsigned int segno;
1460 
1461 		seg_i = CURSEG_I(sbi, i);
1462 		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1463 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1464 		seg_i->next_segno = segno;
1465 		reset_curseg(sbi, i, 0);
1466 		seg_i->alloc_type = ckpt->alloc_type[i];
1467 		seg_i->next_blkoff = blk_off;
1468 
1469 		if (seg_i->alloc_type == SSR)
1470 			blk_off = sbi->blocks_per_seg;
1471 
1472 		for (j = 0; j < blk_off; j++) {
1473 			struct f2fs_summary *s;
1474 			s = (struct f2fs_summary *)(kaddr + offset);
1475 			seg_i->sum_blk->entries[j] = *s;
1476 			offset += SUMMARY_SIZE;
1477 			if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1478 						SUM_FOOTER_SIZE)
1479 				continue;
1480 
1481 			f2fs_put_page(page, 1);
1482 			page = NULL;
1483 
1484 			page = get_meta_page(sbi, start++);
1485 			kaddr = (unsigned char *)page_address(page);
1486 			offset = 0;
1487 		}
1488 	}
1489 	f2fs_put_page(page, 1);
1490 	return 0;
1491 }
1492 
1493 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1494 {
1495 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1496 	struct f2fs_summary_block *sum;
1497 	struct curseg_info *curseg;
1498 	struct page *new;
1499 	unsigned short blk_off;
1500 	unsigned int segno = 0;
1501 	block_t blk_addr = 0;
1502 
1503 	/* get segment number and block addr */
1504 	if (IS_DATASEG(type)) {
1505 		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1506 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1507 							CURSEG_HOT_DATA]);
1508 		if (__exist_node_summaries(sbi))
1509 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1510 		else
1511 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1512 	} else {
1513 		segno = le32_to_cpu(ckpt->cur_node_segno[type -
1514 							CURSEG_HOT_NODE]);
1515 		blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1516 							CURSEG_HOT_NODE]);
1517 		if (__exist_node_summaries(sbi))
1518 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1519 							type - CURSEG_HOT_NODE);
1520 		else
1521 			blk_addr = GET_SUM_BLOCK(sbi, segno);
1522 	}
1523 
1524 	new = get_meta_page(sbi, blk_addr);
1525 	sum = (struct f2fs_summary_block *)page_address(new);
1526 
1527 	if (IS_NODESEG(type)) {
1528 		if (__exist_node_summaries(sbi)) {
1529 			struct f2fs_summary *ns = &sum->entries[0];
1530 			int i;
1531 			for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1532 				ns->version = 0;
1533 				ns->ofs_in_node = 0;
1534 			}
1535 		} else {
1536 			int err;
1537 
1538 			err = restore_node_summary(sbi, segno, sum);
1539 			if (err) {
1540 				f2fs_put_page(new, 1);
1541 				return err;
1542 			}
1543 		}
1544 	}
1545 
1546 	/* set uncompleted segment to curseg */
1547 	curseg = CURSEG_I(sbi, type);
1548 	mutex_lock(&curseg->curseg_mutex);
1549 	memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1550 	curseg->next_segno = segno;
1551 	reset_curseg(sbi, type, 0);
1552 	curseg->alloc_type = ckpt->alloc_type[type];
1553 	curseg->next_blkoff = blk_off;
1554 	mutex_unlock(&curseg->curseg_mutex);
1555 	f2fs_put_page(new, 1);
1556 	return 0;
1557 }
1558 
1559 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1560 {
1561 	int type = CURSEG_HOT_DATA;
1562 	int err;
1563 
1564 	if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
1565 		int npages = npages_for_summary_flush(sbi, true);
1566 
1567 		if (npages >= 2)
1568 			ra_meta_pages(sbi, start_sum_block(sbi), npages,
1569 								META_CP);
1570 
1571 		/* restore for compacted data summary */
1572 		if (read_compacted_summaries(sbi))
1573 			return -EINVAL;
1574 		type = CURSEG_HOT_NODE;
1575 	}
1576 
1577 	if (__exist_node_summaries(sbi))
1578 		ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
1579 					NR_CURSEG_TYPE - type, META_CP);
1580 
1581 	for (; type <= CURSEG_COLD_NODE; type++) {
1582 		err = read_normal_summaries(sbi, type);
1583 		if (err)
1584 			return err;
1585 	}
1586 
1587 	return 0;
1588 }
1589 
1590 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1591 {
1592 	struct page *page;
1593 	unsigned char *kaddr;
1594 	struct f2fs_summary *summary;
1595 	struct curseg_info *seg_i;
1596 	int written_size = 0;
1597 	int i, j;
1598 
1599 	page = grab_meta_page(sbi, blkaddr++);
1600 	kaddr = (unsigned char *)page_address(page);
1601 
1602 	/* Step 1: write nat cache */
1603 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1604 	memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1605 	written_size += SUM_JOURNAL_SIZE;
1606 
1607 	/* Step 2: write sit cache */
1608 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1609 	memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1610 						SUM_JOURNAL_SIZE);
1611 	written_size += SUM_JOURNAL_SIZE;
1612 
1613 	/* Step 3: write summary entries */
1614 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1615 		unsigned short blkoff;
1616 		seg_i = CURSEG_I(sbi, i);
1617 		if (sbi->ckpt->alloc_type[i] == SSR)
1618 			blkoff = sbi->blocks_per_seg;
1619 		else
1620 			blkoff = curseg_blkoff(sbi, i);
1621 
1622 		for (j = 0; j < blkoff; j++) {
1623 			if (!page) {
1624 				page = grab_meta_page(sbi, blkaddr++);
1625 				kaddr = (unsigned char *)page_address(page);
1626 				written_size = 0;
1627 			}
1628 			summary = (struct f2fs_summary *)(kaddr + written_size);
1629 			*summary = seg_i->sum_blk->entries[j];
1630 			written_size += SUMMARY_SIZE;
1631 
1632 			if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1633 							SUM_FOOTER_SIZE)
1634 				continue;
1635 
1636 			set_page_dirty(page);
1637 			f2fs_put_page(page, 1);
1638 			page = NULL;
1639 		}
1640 	}
1641 	if (page) {
1642 		set_page_dirty(page);
1643 		f2fs_put_page(page, 1);
1644 	}
1645 }
1646 
1647 static void write_normal_summaries(struct f2fs_sb_info *sbi,
1648 					block_t blkaddr, int type)
1649 {
1650 	int i, end;
1651 	if (IS_DATASEG(type))
1652 		end = type + NR_CURSEG_DATA_TYPE;
1653 	else
1654 		end = type + NR_CURSEG_NODE_TYPE;
1655 
1656 	for (i = type; i < end; i++) {
1657 		struct curseg_info *sum = CURSEG_I(sbi, i);
1658 		mutex_lock(&sum->curseg_mutex);
1659 		write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1660 		mutex_unlock(&sum->curseg_mutex);
1661 	}
1662 }
1663 
1664 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1665 {
1666 	if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
1667 		write_compacted_summaries(sbi, start_blk);
1668 	else
1669 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1670 }
1671 
1672 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1673 {
1674 	write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
1675 }
1676 
1677 int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1678 					unsigned int val, int alloc)
1679 {
1680 	int i;
1681 
1682 	if (type == NAT_JOURNAL) {
1683 		for (i = 0; i < nats_in_cursum(sum); i++) {
1684 			if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1685 				return i;
1686 		}
1687 		if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1688 			return update_nats_in_cursum(sum, 1);
1689 	} else if (type == SIT_JOURNAL) {
1690 		for (i = 0; i < sits_in_cursum(sum); i++)
1691 			if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1692 				return i;
1693 		if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1694 			return update_sits_in_cursum(sum, 1);
1695 	}
1696 	return -1;
1697 }
1698 
1699 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1700 					unsigned int segno)
1701 {
1702 	return get_meta_page(sbi, current_sit_addr(sbi, segno));
1703 }
1704 
1705 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1706 					unsigned int start)
1707 {
1708 	struct sit_info *sit_i = SIT_I(sbi);
1709 	struct page *src_page, *dst_page;
1710 	pgoff_t src_off, dst_off;
1711 	void *src_addr, *dst_addr;
1712 
1713 	src_off = current_sit_addr(sbi, start);
1714 	dst_off = next_sit_addr(sbi, src_off);
1715 
1716 	/* get current sit block page without lock */
1717 	src_page = get_meta_page(sbi, src_off);
1718 	dst_page = grab_meta_page(sbi, dst_off);
1719 	f2fs_bug_on(sbi, PageDirty(src_page));
1720 
1721 	src_addr = page_address(src_page);
1722 	dst_addr = page_address(dst_page);
1723 	memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1724 
1725 	set_page_dirty(dst_page);
1726 	f2fs_put_page(src_page, 1);
1727 
1728 	set_to_next_sit(sit_i, start);
1729 
1730 	return dst_page;
1731 }
1732 
1733 static struct sit_entry_set *grab_sit_entry_set(void)
1734 {
1735 	struct sit_entry_set *ses =
1736 			f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_ATOMIC);
1737 
1738 	ses->entry_cnt = 0;
1739 	INIT_LIST_HEAD(&ses->set_list);
1740 	return ses;
1741 }
1742 
1743 static void release_sit_entry_set(struct sit_entry_set *ses)
1744 {
1745 	list_del(&ses->set_list);
1746 	kmem_cache_free(sit_entry_set_slab, ses);
1747 }
1748 
1749 static void adjust_sit_entry_set(struct sit_entry_set *ses,
1750 						struct list_head *head)
1751 {
1752 	struct sit_entry_set *next = ses;
1753 
1754 	if (list_is_last(&ses->set_list, head))
1755 		return;
1756 
1757 	list_for_each_entry_continue(next, head, set_list)
1758 		if (ses->entry_cnt <= next->entry_cnt)
1759 			break;
1760 
1761 	list_move_tail(&ses->set_list, &next->set_list);
1762 }
1763 
1764 static void add_sit_entry(unsigned int segno, struct list_head *head)
1765 {
1766 	struct sit_entry_set *ses;
1767 	unsigned int start_segno = START_SEGNO(segno);
1768 
1769 	list_for_each_entry(ses, head, set_list) {
1770 		if (ses->start_segno == start_segno) {
1771 			ses->entry_cnt++;
1772 			adjust_sit_entry_set(ses, head);
1773 			return;
1774 		}
1775 	}
1776 
1777 	ses = grab_sit_entry_set();
1778 
1779 	ses->start_segno = start_segno;
1780 	ses->entry_cnt++;
1781 	list_add(&ses->set_list, head);
1782 }
1783 
1784 static void add_sits_in_set(struct f2fs_sb_info *sbi)
1785 {
1786 	struct f2fs_sm_info *sm_info = SM_I(sbi);
1787 	struct list_head *set_list = &sm_info->sit_entry_set;
1788 	unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
1789 	unsigned int segno;
1790 
1791 	for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
1792 		add_sit_entry(segno, set_list);
1793 }
1794 
1795 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
1796 {
1797 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1798 	struct f2fs_summary_block *sum = curseg->sum_blk;
1799 	int i;
1800 
1801 	for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1802 		unsigned int segno;
1803 		bool dirtied;
1804 
1805 		segno = le32_to_cpu(segno_in_journal(sum, i));
1806 		dirtied = __mark_sit_entry_dirty(sbi, segno);
1807 
1808 		if (!dirtied)
1809 			add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
1810 	}
1811 	update_sits_in_cursum(sum, -sits_in_cursum(sum));
1812 }
1813 
1814 /*
1815  * CP calls this function, which flushes SIT entries including sit_journal,
1816  * and moves prefree segs to free segs.
1817  */
1818 void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1819 {
1820 	struct sit_info *sit_i = SIT_I(sbi);
1821 	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1822 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1823 	struct f2fs_summary_block *sum = curseg->sum_blk;
1824 	struct sit_entry_set *ses, *tmp;
1825 	struct list_head *head = &SM_I(sbi)->sit_entry_set;
1826 	bool to_journal = true;
1827 	struct seg_entry *se;
1828 
1829 	mutex_lock(&curseg->curseg_mutex);
1830 	mutex_lock(&sit_i->sentry_lock);
1831 
1832 	if (!sit_i->dirty_sentries)
1833 		goto out;
1834 
1835 	/*
1836 	 * add and account sit entries of dirty bitmap in sit entry
1837 	 * set temporarily
1838 	 */
1839 	add_sits_in_set(sbi);
1840 
1841 	/*
1842 	 * if there are no enough space in journal to store dirty sit
1843 	 * entries, remove all entries from journal and add and account
1844 	 * them in sit entry set.
1845 	 */
1846 	if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL))
1847 		remove_sits_in_journal(sbi);
1848 
1849 	/*
1850 	 * there are two steps to flush sit entries:
1851 	 * #1, flush sit entries to journal in current cold data summary block.
1852 	 * #2, flush sit entries to sit page.
1853 	 */
1854 	list_for_each_entry_safe(ses, tmp, head, set_list) {
1855 		struct page *page = NULL;
1856 		struct f2fs_sit_block *raw_sit = NULL;
1857 		unsigned int start_segno = ses->start_segno;
1858 		unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
1859 						(unsigned long)MAIN_SEGS(sbi));
1860 		unsigned int segno = start_segno;
1861 
1862 		if (to_journal &&
1863 			!__has_cursum_space(sum, ses->entry_cnt, SIT_JOURNAL))
1864 			to_journal = false;
1865 
1866 		if (!to_journal) {
1867 			page = get_next_sit_page(sbi, start_segno);
1868 			raw_sit = page_address(page);
1869 		}
1870 
1871 		/* flush dirty sit entries in region of current sit set */
1872 		for_each_set_bit_from(segno, bitmap, end) {
1873 			int offset, sit_offset;
1874 
1875 			se = get_seg_entry(sbi, segno);
1876 
1877 			/* add discard candidates */
1878 			if (cpc->reason != CP_DISCARD) {
1879 				cpc->trim_start = segno;
1880 				add_discard_addrs(sbi, cpc);
1881 			}
1882 
1883 			if (to_journal) {
1884 				offset = lookup_journal_in_cursum(sum,
1885 							SIT_JOURNAL, segno, 1);
1886 				f2fs_bug_on(sbi, offset < 0);
1887 				segno_in_journal(sum, offset) =
1888 							cpu_to_le32(segno);
1889 				seg_info_to_raw_sit(se,
1890 						&sit_in_journal(sum, offset));
1891 			} else {
1892 				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1893 				seg_info_to_raw_sit(se,
1894 						&raw_sit->entries[sit_offset]);
1895 			}
1896 
1897 			__clear_bit(segno, bitmap);
1898 			sit_i->dirty_sentries--;
1899 			ses->entry_cnt--;
1900 		}
1901 
1902 		if (!to_journal)
1903 			f2fs_put_page(page, 1);
1904 
1905 		f2fs_bug_on(sbi, ses->entry_cnt);
1906 		release_sit_entry_set(ses);
1907 	}
1908 
1909 	f2fs_bug_on(sbi, !list_empty(head));
1910 	f2fs_bug_on(sbi, sit_i->dirty_sentries);
1911 out:
1912 	if (cpc->reason == CP_DISCARD) {
1913 		for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
1914 			add_discard_addrs(sbi, cpc);
1915 	}
1916 	mutex_unlock(&sit_i->sentry_lock);
1917 	mutex_unlock(&curseg->curseg_mutex);
1918 
1919 	set_prefree_as_free_segments(sbi);
1920 }
1921 
1922 static int build_sit_info(struct f2fs_sb_info *sbi)
1923 {
1924 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1925 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1926 	struct sit_info *sit_i;
1927 	unsigned int sit_segs, start;
1928 	char *src_bitmap, *dst_bitmap;
1929 	unsigned int bitmap_size;
1930 
1931 	/* allocate memory for SIT information */
1932 	sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1933 	if (!sit_i)
1934 		return -ENOMEM;
1935 
1936 	SM_I(sbi)->sit_info = sit_i;
1937 
1938 	sit_i->sentries = vzalloc(MAIN_SEGS(sbi) * sizeof(struct seg_entry));
1939 	if (!sit_i->sentries)
1940 		return -ENOMEM;
1941 
1942 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
1943 	sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1944 	if (!sit_i->dirty_sentries_bitmap)
1945 		return -ENOMEM;
1946 
1947 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
1948 		sit_i->sentries[start].cur_valid_map
1949 			= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1950 		sit_i->sentries[start].ckpt_valid_map
1951 			= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1952 		sit_i->sentries[start].discard_map
1953 			= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1954 		if (!sit_i->sentries[start].cur_valid_map ||
1955 				!sit_i->sentries[start].ckpt_valid_map ||
1956 				!sit_i->sentries[start].discard_map)
1957 			return -ENOMEM;
1958 	}
1959 
1960 	sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1961 	if (!sit_i->tmp_map)
1962 		return -ENOMEM;
1963 
1964 	if (sbi->segs_per_sec > 1) {
1965 		sit_i->sec_entries = vzalloc(MAIN_SECS(sbi) *
1966 					sizeof(struct sec_entry));
1967 		if (!sit_i->sec_entries)
1968 			return -ENOMEM;
1969 	}
1970 
1971 	/* get information related with SIT */
1972 	sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1973 
1974 	/* setup SIT bitmap from ckeckpoint pack */
1975 	bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1976 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1977 
1978 	dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
1979 	if (!dst_bitmap)
1980 		return -ENOMEM;
1981 
1982 	/* init SIT information */
1983 	sit_i->s_ops = &default_salloc_ops;
1984 
1985 	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1986 	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1987 	sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1988 	sit_i->sit_bitmap = dst_bitmap;
1989 	sit_i->bitmap_size = bitmap_size;
1990 	sit_i->dirty_sentries = 0;
1991 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1992 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1993 	sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1994 	mutex_init(&sit_i->sentry_lock);
1995 	return 0;
1996 }
1997 
1998 static int build_free_segmap(struct f2fs_sb_info *sbi)
1999 {
2000 	struct free_segmap_info *free_i;
2001 	unsigned int bitmap_size, sec_bitmap_size;
2002 
2003 	/* allocate memory for free segmap information */
2004 	free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
2005 	if (!free_i)
2006 		return -ENOMEM;
2007 
2008 	SM_I(sbi)->free_info = free_i;
2009 
2010 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2011 	free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
2012 	if (!free_i->free_segmap)
2013 		return -ENOMEM;
2014 
2015 	sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
2016 	free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
2017 	if (!free_i->free_secmap)
2018 		return -ENOMEM;
2019 
2020 	/* set all segments as dirty temporarily */
2021 	memset(free_i->free_segmap, 0xff, bitmap_size);
2022 	memset(free_i->free_secmap, 0xff, sec_bitmap_size);
2023 
2024 	/* init free segmap information */
2025 	free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
2026 	free_i->free_segments = 0;
2027 	free_i->free_sections = 0;
2028 	spin_lock_init(&free_i->segmap_lock);
2029 	return 0;
2030 }
2031 
2032 static int build_curseg(struct f2fs_sb_info *sbi)
2033 {
2034 	struct curseg_info *array;
2035 	int i;
2036 
2037 	array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
2038 	if (!array)
2039 		return -ENOMEM;
2040 
2041 	SM_I(sbi)->curseg_array = array;
2042 
2043 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
2044 		mutex_init(&array[i].curseg_mutex);
2045 		array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
2046 		if (!array[i].sum_blk)
2047 			return -ENOMEM;
2048 		array[i].segno = NULL_SEGNO;
2049 		array[i].next_blkoff = 0;
2050 	}
2051 	return restore_curseg_summaries(sbi);
2052 }
2053 
2054 static void build_sit_entries(struct f2fs_sb_info *sbi)
2055 {
2056 	struct sit_info *sit_i = SIT_I(sbi);
2057 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2058 	struct f2fs_summary_block *sum = curseg->sum_blk;
2059 	int sit_blk_cnt = SIT_BLK_CNT(sbi);
2060 	unsigned int i, start, end;
2061 	unsigned int readed, start_blk = 0;
2062 	int nrpages = MAX_BIO_BLOCKS(sbi);
2063 
2064 	do {
2065 		readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT);
2066 
2067 		start = start_blk * sit_i->sents_per_block;
2068 		end = (start_blk + readed) * sit_i->sents_per_block;
2069 
2070 		for (; start < end && start < MAIN_SEGS(sbi); start++) {
2071 			struct seg_entry *se = &sit_i->sentries[start];
2072 			struct f2fs_sit_block *sit_blk;
2073 			struct f2fs_sit_entry sit;
2074 			struct page *page;
2075 
2076 			mutex_lock(&curseg->curseg_mutex);
2077 			for (i = 0; i < sits_in_cursum(sum); i++) {
2078 				if (le32_to_cpu(segno_in_journal(sum, i))
2079 								== start) {
2080 					sit = sit_in_journal(sum, i);
2081 					mutex_unlock(&curseg->curseg_mutex);
2082 					goto got_it;
2083 				}
2084 			}
2085 			mutex_unlock(&curseg->curseg_mutex);
2086 
2087 			page = get_current_sit_page(sbi, start);
2088 			sit_blk = (struct f2fs_sit_block *)page_address(page);
2089 			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
2090 			f2fs_put_page(page, 1);
2091 got_it:
2092 			check_block_count(sbi, start, &sit);
2093 			seg_info_from_raw_sit(se, &sit);
2094 
2095 			/* build discard map only one time */
2096 			memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2097 			sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks;
2098 
2099 			if (sbi->segs_per_sec > 1) {
2100 				struct sec_entry *e = get_sec_entry(sbi, start);
2101 				e->valid_blocks += se->valid_blocks;
2102 			}
2103 		}
2104 		start_blk += readed;
2105 	} while (start_blk < sit_blk_cnt);
2106 }
2107 
2108 static void init_free_segmap(struct f2fs_sb_info *sbi)
2109 {
2110 	unsigned int start;
2111 	int type;
2112 
2113 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
2114 		struct seg_entry *sentry = get_seg_entry(sbi, start);
2115 		if (!sentry->valid_blocks)
2116 			__set_free(sbi, start);
2117 	}
2118 
2119 	/* set use the current segments */
2120 	for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
2121 		struct curseg_info *curseg_t = CURSEG_I(sbi, type);
2122 		__set_test_and_inuse(sbi, curseg_t->segno);
2123 	}
2124 }
2125 
2126 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
2127 {
2128 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2129 	struct free_segmap_info *free_i = FREE_I(sbi);
2130 	unsigned int segno = 0, offset = 0;
2131 	unsigned short valid_blocks;
2132 
2133 	while (1) {
2134 		/* find dirty segment based on free segmap */
2135 		segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
2136 		if (segno >= MAIN_SEGS(sbi))
2137 			break;
2138 		offset = segno + 1;
2139 		valid_blocks = get_valid_blocks(sbi, segno, 0);
2140 		if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
2141 			continue;
2142 		if (valid_blocks > sbi->blocks_per_seg) {
2143 			f2fs_bug_on(sbi, 1);
2144 			continue;
2145 		}
2146 		mutex_lock(&dirty_i->seglist_lock);
2147 		__locate_dirty_segment(sbi, segno, DIRTY);
2148 		mutex_unlock(&dirty_i->seglist_lock);
2149 	}
2150 }
2151 
2152 static int init_victim_secmap(struct f2fs_sb_info *sbi)
2153 {
2154 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2155 	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
2156 
2157 	dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
2158 	if (!dirty_i->victim_secmap)
2159 		return -ENOMEM;
2160 	return 0;
2161 }
2162 
2163 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
2164 {
2165 	struct dirty_seglist_info *dirty_i;
2166 	unsigned int bitmap_size, i;
2167 
2168 	/* allocate memory for dirty segments list information */
2169 	dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
2170 	if (!dirty_i)
2171 		return -ENOMEM;
2172 
2173 	SM_I(sbi)->dirty_info = dirty_i;
2174 	mutex_init(&dirty_i->seglist_lock);
2175 
2176 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2177 
2178 	for (i = 0; i < NR_DIRTY_TYPE; i++) {
2179 		dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
2180 		if (!dirty_i->dirty_segmap[i])
2181 			return -ENOMEM;
2182 	}
2183 
2184 	init_dirty_segmap(sbi);
2185 	return init_victim_secmap(sbi);
2186 }
2187 
2188 /*
2189  * Update min, max modified time for cost-benefit GC algorithm
2190  */
2191 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
2192 {
2193 	struct sit_info *sit_i = SIT_I(sbi);
2194 	unsigned int segno;
2195 
2196 	mutex_lock(&sit_i->sentry_lock);
2197 
2198 	sit_i->min_mtime = LLONG_MAX;
2199 
2200 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
2201 		unsigned int i;
2202 		unsigned long long mtime = 0;
2203 
2204 		for (i = 0; i < sbi->segs_per_sec; i++)
2205 			mtime += get_seg_entry(sbi, segno + i)->mtime;
2206 
2207 		mtime = div_u64(mtime, sbi->segs_per_sec);
2208 
2209 		if (sit_i->min_mtime > mtime)
2210 			sit_i->min_mtime = mtime;
2211 	}
2212 	sit_i->max_mtime = get_mtime(sbi);
2213 	mutex_unlock(&sit_i->sentry_lock);
2214 }
2215 
2216 int build_segment_manager(struct f2fs_sb_info *sbi)
2217 {
2218 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2219 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2220 	struct f2fs_sm_info *sm_info;
2221 	int err;
2222 
2223 	sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
2224 	if (!sm_info)
2225 		return -ENOMEM;
2226 
2227 	/* init sm info */
2228 	sbi->sm_info = sm_info;
2229 	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2230 	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2231 	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
2232 	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
2233 	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
2234 	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
2235 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
2236 	sm_info->rec_prefree_segments = sm_info->main_segments *
2237 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
2238 	sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
2239 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
2240 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
2241 
2242 	INIT_LIST_HEAD(&sm_info->discard_list);
2243 	sm_info->nr_discards = 0;
2244 	sm_info->max_discards = 0;
2245 
2246 	sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
2247 
2248 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
2249 
2250 	if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
2251 		err = create_flush_cmd_control(sbi);
2252 		if (err)
2253 			return err;
2254 	}
2255 
2256 	err = build_sit_info(sbi);
2257 	if (err)
2258 		return err;
2259 	err = build_free_segmap(sbi);
2260 	if (err)
2261 		return err;
2262 	err = build_curseg(sbi);
2263 	if (err)
2264 		return err;
2265 
2266 	/* reinit free segmap based on SIT */
2267 	build_sit_entries(sbi);
2268 
2269 	init_free_segmap(sbi);
2270 	err = build_dirty_segmap(sbi);
2271 	if (err)
2272 		return err;
2273 
2274 	init_min_max_mtime(sbi);
2275 	return 0;
2276 }
2277 
2278 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
2279 		enum dirty_type dirty_type)
2280 {
2281 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2282 
2283 	mutex_lock(&dirty_i->seglist_lock);
2284 	kfree(dirty_i->dirty_segmap[dirty_type]);
2285 	dirty_i->nr_dirty[dirty_type] = 0;
2286 	mutex_unlock(&dirty_i->seglist_lock);
2287 }
2288 
2289 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
2290 {
2291 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2292 	kfree(dirty_i->victim_secmap);
2293 }
2294 
2295 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
2296 {
2297 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2298 	int i;
2299 
2300 	if (!dirty_i)
2301 		return;
2302 
2303 	/* discard pre-free/dirty segments list */
2304 	for (i = 0; i < NR_DIRTY_TYPE; i++)
2305 		discard_dirty_segmap(sbi, i);
2306 
2307 	destroy_victim_secmap(sbi);
2308 	SM_I(sbi)->dirty_info = NULL;
2309 	kfree(dirty_i);
2310 }
2311 
2312 static void destroy_curseg(struct f2fs_sb_info *sbi)
2313 {
2314 	struct curseg_info *array = SM_I(sbi)->curseg_array;
2315 	int i;
2316 
2317 	if (!array)
2318 		return;
2319 	SM_I(sbi)->curseg_array = NULL;
2320 	for (i = 0; i < NR_CURSEG_TYPE; i++)
2321 		kfree(array[i].sum_blk);
2322 	kfree(array);
2323 }
2324 
2325 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
2326 {
2327 	struct free_segmap_info *free_i = SM_I(sbi)->free_info;
2328 	if (!free_i)
2329 		return;
2330 	SM_I(sbi)->free_info = NULL;
2331 	kfree(free_i->free_segmap);
2332 	kfree(free_i->free_secmap);
2333 	kfree(free_i);
2334 }
2335 
2336 static void destroy_sit_info(struct f2fs_sb_info *sbi)
2337 {
2338 	struct sit_info *sit_i = SIT_I(sbi);
2339 	unsigned int start;
2340 
2341 	if (!sit_i)
2342 		return;
2343 
2344 	if (sit_i->sentries) {
2345 		for (start = 0; start < MAIN_SEGS(sbi); start++) {
2346 			kfree(sit_i->sentries[start].cur_valid_map);
2347 			kfree(sit_i->sentries[start].ckpt_valid_map);
2348 			kfree(sit_i->sentries[start].discard_map);
2349 		}
2350 	}
2351 	kfree(sit_i->tmp_map);
2352 
2353 	vfree(sit_i->sentries);
2354 	vfree(sit_i->sec_entries);
2355 	kfree(sit_i->dirty_sentries_bitmap);
2356 
2357 	SM_I(sbi)->sit_info = NULL;
2358 	kfree(sit_i->sit_bitmap);
2359 	kfree(sit_i);
2360 }
2361 
2362 void destroy_segment_manager(struct f2fs_sb_info *sbi)
2363 {
2364 	struct f2fs_sm_info *sm_info = SM_I(sbi);
2365 
2366 	if (!sm_info)
2367 		return;
2368 	destroy_flush_cmd_control(sbi);
2369 	destroy_dirty_segmap(sbi);
2370 	destroy_curseg(sbi);
2371 	destroy_free_segmap(sbi);
2372 	destroy_sit_info(sbi);
2373 	sbi->sm_info = NULL;
2374 	kfree(sm_info);
2375 }
2376 
2377 int __init create_segment_manager_caches(void)
2378 {
2379 	discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
2380 			sizeof(struct discard_entry));
2381 	if (!discard_entry_slab)
2382 		goto fail;
2383 
2384 	sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
2385 			sizeof(struct sit_entry_set));
2386 	if (!sit_entry_set_slab)
2387 		goto destory_discard_entry;
2388 
2389 	inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
2390 			sizeof(struct inmem_pages));
2391 	if (!inmem_entry_slab)
2392 		goto destroy_sit_entry_set;
2393 	return 0;
2394 
2395 destroy_sit_entry_set:
2396 	kmem_cache_destroy(sit_entry_set_slab);
2397 destory_discard_entry:
2398 	kmem_cache_destroy(discard_entry_slab);
2399 fail:
2400 	return -ENOMEM;
2401 }
2402 
2403 void destroy_segment_manager_caches(void)
2404 {
2405 	kmem_cache_destroy(sit_entry_set_slab);
2406 	kmem_cache_destroy(discard_entry_slab);
2407 	kmem_cache_destroy(inmem_entry_slab);
2408 }
2409