xref: /openbmc/linux/fs/f2fs/segment.c (revision 4ddb1a4d4dc20642073b7d92400a67b67601fe6f)
10a8165d7SJaegeuk Kim /*
2351df4b2SJaegeuk Kim  * fs/f2fs/segment.c
3351df4b2SJaegeuk Kim  *
4351df4b2SJaegeuk Kim  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5351df4b2SJaegeuk Kim  *             http://www.samsung.com/
6351df4b2SJaegeuk Kim  *
7351df4b2SJaegeuk Kim  * This program is free software; you can redistribute it and/or modify
8351df4b2SJaegeuk Kim  * it under the terms of the GNU General Public License version 2 as
9351df4b2SJaegeuk Kim  * published by the Free Software Foundation.
10351df4b2SJaegeuk Kim  */
11351df4b2SJaegeuk Kim #include <linux/fs.h>
12351df4b2SJaegeuk Kim #include <linux/f2fs_fs.h>
13351df4b2SJaegeuk Kim #include <linux/bio.h>
14351df4b2SJaegeuk Kim #include <linux/blkdev.h>
15690e4a3eSGeert Uytterhoeven #include <linux/prefetch.h>
166b4afdd7SJaegeuk Kim #include <linux/kthread.h>
1774de593aSChao Yu #include <linux/swap.h>
1860b99b48SJaegeuk Kim #include <linux/timer.h>
19351df4b2SJaegeuk Kim 
20351df4b2SJaegeuk Kim #include "f2fs.h"
21351df4b2SJaegeuk Kim #include "segment.h"
22351df4b2SJaegeuk Kim #include "node.h"
239e4ded3fSJaegeuk Kim #include "trace.h"
246ec178daSNamjae Jeon #include <trace/events/f2fs.h>
25351df4b2SJaegeuk Kim 
269a7f143aSChangman Lee #define __reverse_ffz(x) __reverse_ffs(~(x))
279a7f143aSChangman Lee 
287fd9e544SJaegeuk Kim static struct kmem_cache *discard_entry_slab;
29b01a9201SJaegeuk Kim static struct kmem_cache *discard_cmd_slab;
30184a5cd2SChao Yu static struct kmem_cache *sit_entry_set_slab;
3188b88a66SJaegeuk Kim static struct kmem_cache *inmem_entry_slab;
327fd9e544SJaegeuk Kim 
33f96999c3SJaegeuk Kim static unsigned long __reverse_ulong(unsigned char *str)
34f96999c3SJaegeuk Kim {
35f96999c3SJaegeuk Kim 	unsigned long tmp = 0;
36f96999c3SJaegeuk Kim 	int shift = 24, idx = 0;
37f96999c3SJaegeuk Kim 
38f96999c3SJaegeuk Kim #if BITS_PER_LONG == 64
39f96999c3SJaegeuk Kim 	shift = 56;
40f96999c3SJaegeuk Kim #endif
41f96999c3SJaegeuk Kim 	while (shift >= 0) {
42f96999c3SJaegeuk Kim 		tmp |= (unsigned long)str[idx++] << shift;
43f96999c3SJaegeuk Kim 		shift -= BITS_PER_BYTE;
44f96999c3SJaegeuk Kim 	}
45f96999c3SJaegeuk Kim 	return tmp;
46f96999c3SJaegeuk Kim }
47f96999c3SJaegeuk Kim 
489a7f143aSChangman Lee /*
499a7f143aSChangman Lee  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
509a7f143aSChangman Lee  * MSB and LSB are reversed in a byte by f2fs_set_bit.
519a7f143aSChangman Lee  */
529a7f143aSChangman Lee static inline unsigned long __reverse_ffs(unsigned long word)
539a7f143aSChangman Lee {
549a7f143aSChangman Lee 	int num = 0;
559a7f143aSChangman Lee 
569a7f143aSChangman Lee #if BITS_PER_LONG == 64
57f96999c3SJaegeuk Kim 	if ((word & 0xffffffff00000000UL) == 0)
589a7f143aSChangman Lee 		num += 32;
59f96999c3SJaegeuk Kim 	else
609a7f143aSChangman Lee 		word >>= 32;
619a7f143aSChangman Lee #endif
62f96999c3SJaegeuk Kim 	if ((word & 0xffff0000) == 0)
639a7f143aSChangman Lee 		num += 16;
64f96999c3SJaegeuk Kim 	else
659a7f143aSChangman Lee 		word >>= 16;
66f96999c3SJaegeuk Kim 
67f96999c3SJaegeuk Kim 	if ((word & 0xff00) == 0)
689a7f143aSChangman Lee 		num += 8;
69f96999c3SJaegeuk Kim 	else
709a7f143aSChangman Lee 		word >>= 8;
71f96999c3SJaegeuk Kim 
729a7f143aSChangman Lee 	if ((word & 0xf0) == 0)
739a7f143aSChangman Lee 		num += 4;
749a7f143aSChangman Lee 	else
759a7f143aSChangman Lee 		word >>= 4;
76f96999c3SJaegeuk Kim 
779a7f143aSChangman Lee 	if ((word & 0xc) == 0)
789a7f143aSChangman Lee 		num += 2;
799a7f143aSChangman Lee 	else
809a7f143aSChangman Lee 		word >>= 2;
81f96999c3SJaegeuk Kim 
829a7f143aSChangman Lee 	if ((word & 0x2) == 0)
839a7f143aSChangman Lee 		num += 1;
849a7f143aSChangman Lee 	return num;
859a7f143aSChangman Lee }
869a7f143aSChangman Lee 
879a7f143aSChangman Lee /*
88e1c42045Sarter97  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
899a7f143aSChangman Lee  * f2fs_set_bit makes MSB and LSB reversed in a byte.
90692223d1SFan Li  * @size must be integral times of unsigned long.
919a7f143aSChangman Lee  * Example:
92f96999c3SJaegeuk Kim  *                             MSB <--> LSB
93f96999c3SJaegeuk Kim  *   f2fs_set_bit(0, bitmap) => 1000 0000
94f96999c3SJaegeuk Kim  *   f2fs_set_bit(7, bitmap) => 0000 0001
959a7f143aSChangman Lee  */
969a7f143aSChangman Lee static unsigned long __find_rev_next_bit(const unsigned long *addr,
979a7f143aSChangman Lee 			unsigned long size, unsigned long offset)
989a7f143aSChangman Lee {
999a7f143aSChangman Lee 	const unsigned long *p = addr + BIT_WORD(offset);
100692223d1SFan Li 	unsigned long result = size;
1019a7f143aSChangman Lee 	unsigned long tmp;
1029a7f143aSChangman Lee 
1039a7f143aSChangman Lee 	if (offset >= size)
1049a7f143aSChangman Lee 		return size;
1059a7f143aSChangman Lee 
106692223d1SFan Li 	size -= (offset & ~(BITS_PER_LONG - 1));
1079a7f143aSChangman Lee 	offset %= BITS_PER_LONG;
108692223d1SFan Li 
109692223d1SFan Li 	while (1) {
110692223d1SFan Li 		if (*p == 0)
111692223d1SFan Li 			goto pass;
1129a7f143aSChangman Lee 
113f96999c3SJaegeuk Kim 		tmp = __reverse_ulong((unsigned char *)p);
114692223d1SFan Li 
115f96999c3SJaegeuk Kim 		tmp &= ~0UL >> offset;
1169a7f143aSChangman Lee 		if (size < BITS_PER_LONG)
117692223d1SFan Li 			tmp &= (~0UL << (BITS_PER_LONG - size));
1189a7f143aSChangman Lee 		if (tmp)
119692223d1SFan Li 			goto found;
120692223d1SFan Li pass:
121692223d1SFan Li 		if (size <= BITS_PER_LONG)
122692223d1SFan Li 			break;
1239a7f143aSChangman Lee 		size -= BITS_PER_LONG;
124692223d1SFan Li 		offset = 0;
125f96999c3SJaegeuk Kim 		p++;
1269a7f143aSChangman Lee 	}
1279a7f143aSChangman Lee 	return result;
128692223d1SFan Li found:
129692223d1SFan Li 	return result - size + __reverse_ffs(tmp);
1309a7f143aSChangman Lee }
1319a7f143aSChangman Lee 
1329a7f143aSChangman Lee static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
1339a7f143aSChangman Lee 			unsigned long size, unsigned long offset)
1349a7f143aSChangman Lee {
1359a7f143aSChangman Lee 	const unsigned long *p = addr + BIT_WORD(offset);
13680609448SJaegeuk Kim 	unsigned long result = size;
1379a7f143aSChangman Lee 	unsigned long tmp;
1389a7f143aSChangman Lee 
1399a7f143aSChangman Lee 	if (offset >= size)
1409a7f143aSChangman Lee 		return size;
1419a7f143aSChangman Lee 
14280609448SJaegeuk Kim 	size -= (offset & ~(BITS_PER_LONG - 1));
1439a7f143aSChangman Lee 	offset %= BITS_PER_LONG;
14480609448SJaegeuk Kim 
14580609448SJaegeuk Kim 	while (1) {
14680609448SJaegeuk Kim 		if (*p == ~0UL)
14780609448SJaegeuk Kim 			goto pass;
1489a7f143aSChangman Lee 
149f96999c3SJaegeuk Kim 		tmp = __reverse_ulong((unsigned char *)p);
150f96999c3SJaegeuk Kim 
15180609448SJaegeuk Kim 		if (offset)
15280609448SJaegeuk Kim 			tmp |= ~0UL << (BITS_PER_LONG - offset);
1539a7f143aSChangman Lee 		if (size < BITS_PER_LONG)
15480609448SJaegeuk Kim 			tmp |= ~0UL >> size;
155f96999c3SJaegeuk Kim 		if (tmp != ~0UL)
15680609448SJaegeuk Kim 			goto found;
15780609448SJaegeuk Kim pass:
15880609448SJaegeuk Kim 		if (size <= BITS_PER_LONG)
15980609448SJaegeuk Kim 			break;
1609a7f143aSChangman Lee 		size -= BITS_PER_LONG;
16180609448SJaegeuk Kim 		offset = 0;
162f96999c3SJaegeuk Kim 		p++;
1639a7f143aSChangman Lee 	}
1649a7f143aSChangman Lee 	return result;
16580609448SJaegeuk Kim found:
16680609448SJaegeuk Kim 	return result - size + __reverse_ffz(tmp);
1679a7f143aSChangman Lee }
1689a7f143aSChangman Lee 
16988b88a66SJaegeuk Kim void register_inmem_page(struct inode *inode, struct page *page)
17088b88a66SJaegeuk Kim {
17188b88a66SJaegeuk Kim 	struct f2fs_inode_info *fi = F2FS_I(inode);
17288b88a66SJaegeuk Kim 	struct inmem_pages *new;
1739be32d72SJaegeuk Kim 
1749e4ded3fSJaegeuk Kim 	f2fs_trace_pid(page);
1750722b101SJaegeuk Kim 
176decd36b6SChao Yu 	set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
177decd36b6SChao Yu 	SetPagePrivate(page);
178decd36b6SChao Yu 
17988b88a66SJaegeuk Kim 	new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
18088b88a66SJaegeuk Kim 
18188b88a66SJaegeuk Kim 	/* add atomic page indices to the list */
18288b88a66SJaegeuk Kim 	new->page = page;
18388b88a66SJaegeuk Kim 	INIT_LIST_HEAD(&new->list);
184decd36b6SChao Yu 
18588b88a66SJaegeuk Kim 	/* increase reference count with clean state */
18688b88a66SJaegeuk Kim 	mutex_lock(&fi->inmem_lock);
18788b88a66SJaegeuk Kim 	get_page(page);
18888b88a66SJaegeuk Kim 	list_add_tail(&new->list, &fi->inmem_pages);
1898dcf2ff7SJaegeuk Kim 	inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
19088b88a66SJaegeuk Kim 	mutex_unlock(&fi->inmem_lock);
1918ce67cb0SJaegeuk Kim 
1928ce67cb0SJaegeuk Kim 	trace_f2fs_register_inmem_page(page, INMEM);
19388b88a66SJaegeuk Kim }
19488b88a66SJaegeuk Kim 
19528bc106bSChao Yu static int __revoke_inmem_pages(struct inode *inode,
19628bc106bSChao Yu 				struct list_head *head, bool drop, bool recover)
19729b96b54SChao Yu {
19828bc106bSChao Yu 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
19929b96b54SChao Yu 	struct inmem_pages *cur, *tmp;
20028bc106bSChao Yu 	int err = 0;
20129b96b54SChao Yu 
20229b96b54SChao Yu 	list_for_each_entry_safe(cur, tmp, head, list) {
20328bc106bSChao Yu 		struct page *page = cur->page;
20429b96b54SChao Yu 
20528bc106bSChao Yu 		if (drop)
20628bc106bSChao Yu 			trace_f2fs_commit_inmem_page(page, INMEM_DROP);
20728bc106bSChao Yu 
20828bc106bSChao Yu 		lock_page(page);
20928bc106bSChao Yu 
21028bc106bSChao Yu 		if (recover) {
21128bc106bSChao Yu 			struct dnode_of_data dn;
21228bc106bSChao Yu 			struct node_info ni;
21328bc106bSChao Yu 
21428bc106bSChao Yu 			trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
21528bc106bSChao Yu 
21628bc106bSChao Yu 			set_new_dnode(&dn, inode, NULL, NULL, 0);
21728bc106bSChao Yu 			if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) {
21828bc106bSChao Yu 				err = -EAGAIN;
21928bc106bSChao Yu 				goto next;
22028bc106bSChao Yu 			}
22128bc106bSChao Yu 			get_node_info(sbi, dn.nid, &ni);
22228bc106bSChao Yu 			f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
22328bc106bSChao Yu 					cur->old_addr, ni.version, true, true);
22428bc106bSChao Yu 			f2fs_put_dnode(&dn);
22528bc106bSChao Yu 		}
22628bc106bSChao Yu next:
22763c52d78SJaegeuk Kim 		/* we don't need to invalidate this in the sccessful status */
22863c52d78SJaegeuk Kim 		if (drop || recover)
22928bc106bSChao Yu 			ClearPageUptodate(page);
23028bc106bSChao Yu 		set_page_private(page, 0);
231c81ced05SChao Yu 		ClearPagePrivate(page);
23228bc106bSChao Yu 		f2fs_put_page(page, 1);
23329b96b54SChao Yu 
23429b96b54SChao Yu 		list_del(&cur->list);
23529b96b54SChao Yu 		kmem_cache_free(inmem_entry_slab, cur);
23629b96b54SChao Yu 		dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
23729b96b54SChao Yu 	}
23828bc106bSChao Yu 	return err;
23929b96b54SChao Yu }
24029b96b54SChao Yu 
24129b96b54SChao Yu void drop_inmem_pages(struct inode *inode)
24229b96b54SChao Yu {
24329b96b54SChao Yu 	struct f2fs_inode_info *fi = F2FS_I(inode);
24429b96b54SChao Yu 
24529b96b54SChao Yu 	mutex_lock(&fi->inmem_lock);
24628bc106bSChao Yu 	__revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
24729b96b54SChao Yu 	mutex_unlock(&fi->inmem_lock);
2485fe45743SChao Yu 
2495fe45743SChao Yu 	clear_inode_flag(inode, FI_ATOMIC_FILE);
2505fe45743SChao Yu 	stat_dec_atomic_write(inode);
25129b96b54SChao Yu }
25229b96b54SChao Yu 
2538c242db9SJaegeuk Kim void drop_inmem_page(struct inode *inode, struct page *page)
2548c242db9SJaegeuk Kim {
2558c242db9SJaegeuk Kim 	struct f2fs_inode_info *fi = F2FS_I(inode);
2568c242db9SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2578c242db9SJaegeuk Kim 	struct list_head *head = &fi->inmem_pages;
2588c242db9SJaegeuk Kim 	struct inmem_pages *cur = NULL;
2598c242db9SJaegeuk Kim 
2608c242db9SJaegeuk Kim 	f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
2618c242db9SJaegeuk Kim 
2628c242db9SJaegeuk Kim 	mutex_lock(&fi->inmem_lock);
2638c242db9SJaegeuk Kim 	list_for_each_entry(cur, head, list) {
2648c242db9SJaegeuk Kim 		if (cur->page == page)
2658c242db9SJaegeuk Kim 			break;
2668c242db9SJaegeuk Kim 	}
2678c242db9SJaegeuk Kim 
2688c242db9SJaegeuk Kim 	f2fs_bug_on(sbi, !cur || cur->page != page);
2698c242db9SJaegeuk Kim 	list_del(&cur->list);
2708c242db9SJaegeuk Kim 	mutex_unlock(&fi->inmem_lock);
2718c242db9SJaegeuk Kim 
2728c242db9SJaegeuk Kim 	dec_page_count(sbi, F2FS_INMEM_PAGES);
2738c242db9SJaegeuk Kim 	kmem_cache_free(inmem_entry_slab, cur);
2748c242db9SJaegeuk Kim 
2758c242db9SJaegeuk Kim 	ClearPageUptodate(page);
2768c242db9SJaegeuk Kim 	set_page_private(page, 0);
2778c242db9SJaegeuk Kim 	ClearPagePrivate(page);
2788c242db9SJaegeuk Kim 	f2fs_put_page(page, 0);
2798c242db9SJaegeuk Kim 
2808c242db9SJaegeuk Kim 	trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
2818c242db9SJaegeuk Kim }
2828c242db9SJaegeuk Kim 
28328bc106bSChao Yu static int __commit_inmem_pages(struct inode *inode,
28428bc106bSChao Yu 					struct list_head *revoke_list)
28588b88a66SJaegeuk Kim {
28688b88a66SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
28788b88a66SJaegeuk Kim 	struct f2fs_inode_info *fi = F2FS_I(inode);
28888b88a66SJaegeuk Kim 	struct inmem_pages *cur, *tmp;
28988b88a66SJaegeuk Kim 	struct f2fs_io_info fio = {
29005ca3632SJaegeuk Kim 		.sbi = sbi,
29188b88a66SJaegeuk Kim 		.type = DATA,
29204d328deSMike Christie 		.op = REQ_OP_WRITE,
29370fd7614SChristoph Hellwig 		.op_flags = REQ_SYNC | REQ_PRIO,
2944375a336SJaegeuk Kim 		.encrypted_page = NULL,
29588b88a66SJaegeuk Kim 	};
296942fd319SJaegeuk Kim 	pgoff_t last_idx = ULONG_MAX;
297edb27deeSJaegeuk Kim 	int err = 0;
29888b88a66SJaegeuk Kim 
29988b88a66SJaegeuk Kim 	list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
30028bc106bSChao Yu 		struct page *page = cur->page;
30128bc106bSChao Yu 
30228bc106bSChao Yu 		lock_page(page);
30328bc106bSChao Yu 		if (page->mapping == inode->i_mapping) {
30428bc106bSChao Yu 			trace_f2fs_commit_inmem_page(page, INMEM);
30528bc106bSChao Yu 
30628bc106bSChao Yu 			set_page_dirty(page);
30728bc106bSChao Yu 			f2fs_wait_on_page_writeback(page, DATA, true);
308933439c8SChao Yu 			if (clear_page_dirty_for_io(page)) {
30988b88a66SJaegeuk Kim 				inode_dec_dirty_pages(inode);
310933439c8SChao Yu 				remove_dirty_inode(inode);
311933439c8SChao Yu 			}
31228bc106bSChao Yu 
31328bc106bSChao Yu 			fio.page = page;
314edb27deeSJaegeuk Kim 			err = do_write_data_page(&fio);
315edb27deeSJaegeuk Kim 			if (err) {
31628bc106bSChao Yu 				unlock_page(page);
317edb27deeSJaegeuk Kim 				break;
318edb27deeSJaegeuk Kim 			}
31928bc106bSChao Yu 
32028bc106bSChao Yu 			/* record old blkaddr for revoking */
32128bc106bSChao Yu 			cur->old_addr = fio.old_blkaddr;
322942fd319SJaegeuk Kim 			last_idx = page->index;
32388b88a66SJaegeuk Kim 		}
32428bc106bSChao Yu 		unlock_page(page);
32528bc106bSChao Yu 		list_move_tail(&cur->list, revoke_list);
32688b88a66SJaegeuk Kim 	}
32729b96b54SChao Yu 
328942fd319SJaegeuk Kim 	if (last_idx != ULONG_MAX)
329942fd319SJaegeuk Kim 		f2fs_submit_merged_bio_cond(sbi, inode, 0, last_idx,
330942fd319SJaegeuk Kim 							DATA, WRITE);
33128bc106bSChao Yu 
33228bc106bSChao Yu 	if (!err)
33328bc106bSChao Yu 		__revoke_inmem_pages(inode, revoke_list, false, false);
33428bc106bSChao Yu 
33529b96b54SChao Yu 	return err;
33629b96b54SChao Yu }
33729b96b54SChao Yu 
33829b96b54SChao Yu int commit_inmem_pages(struct inode *inode)
33929b96b54SChao Yu {
34029b96b54SChao Yu 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
34129b96b54SChao Yu 	struct f2fs_inode_info *fi = F2FS_I(inode);
34228bc106bSChao Yu 	struct list_head revoke_list;
34328bc106bSChao Yu 	int err;
34429b96b54SChao Yu 
34528bc106bSChao Yu 	INIT_LIST_HEAD(&revoke_list);
34629b96b54SChao Yu 	f2fs_balance_fs(sbi, true);
34729b96b54SChao Yu 	f2fs_lock_op(sbi);
34829b96b54SChao Yu 
3495fe45743SChao Yu 	set_inode_flag(inode, FI_ATOMIC_COMMIT);
3505fe45743SChao Yu 
35129b96b54SChao Yu 	mutex_lock(&fi->inmem_lock);
35228bc106bSChao Yu 	err = __commit_inmem_pages(inode, &revoke_list);
35328bc106bSChao Yu 	if (err) {
35428bc106bSChao Yu 		int ret;
35528bc106bSChao Yu 		/*
35628bc106bSChao Yu 		 * try to revoke all committed pages, but still we could fail
35728bc106bSChao Yu 		 * due to no memory or other reason, if that happened, EAGAIN
35828bc106bSChao Yu 		 * will be returned, which means in such case, transaction is
35928bc106bSChao Yu 		 * already not integrity, caller should use journal to do the
36028bc106bSChao Yu 		 * recovery or rewrite & commit last transaction. For other
36128bc106bSChao Yu 		 * error number, revoking was done by filesystem itself.
36228bc106bSChao Yu 		 */
36328bc106bSChao Yu 		ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
36428bc106bSChao Yu 		if (ret)
36528bc106bSChao Yu 			err = ret;
36628bc106bSChao Yu 
36728bc106bSChao Yu 		/* drop all uncommitted pages */
36828bc106bSChao Yu 		__revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
36928bc106bSChao Yu 	}
37088b88a66SJaegeuk Kim 	mutex_unlock(&fi->inmem_lock);
37188b88a66SJaegeuk Kim 
3725fe45743SChao Yu 	clear_inode_flag(inode, FI_ATOMIC_COMMIT);
3735fe45743SChao Yu 
37488b88a66SJaegeuk Kim 	f2fs_unlock_op(sbi);
375edb27deeSJaegeuk Kim 	return err;
37688b88a66SJaegeuk Kim }
37788b88a66SJaegeuk Kim 
3780a8165d7SJaegeuk Kim /*
379351df4b2SJaegeuk Kim  * This function balances dirty node and dentry pages.
380351df4b2SJaegeuk Kim  * In addition, it controls garbage collection.
381351df4b2SJaegeuk Kim  */
3822c4db1a6SJaegeuk Kim void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
383351df4b2SJaegeuk Kim {
3840f348028SChao Yu #ifdef CONFIG_F2FS_FAULT_INJECTION
38555523519SChao Yu 	if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
38655523519SChao Yu 		f2fs_show_injection_info(FAULT_CHECKPOINT);
3870f348028SChao Yu 		f2fs_stop_checkpoint(sbi, false);
38855523519SChao Yu 	}
3890f348028SChao Yu #endif
3900f348028SChao Yu 
3912c4db1a6SJaegeuk Kim 	if (!need)
3922c4db1a6SJaegeuk Kim 		return;
393e589c2c4SJaegeuk Kim 
394e589c2c4SJaegeuk Kim 	/* balance_fs_bg is able to be pending */
395e589c2c4SJaegeuk Kim 	if (excess_cached_nats(sbi))
396e589c2c4SJaegeuk Kim 		f2fs_balance_fs_bg(sbi);
397e589c2c4SJaegeuk Kim 
398351df4b2SJaegeuk Kim 	/*
399029cd28cSJaegeuk Kim 	 * We should do GC or end up with checkpoint, if there are so many dirty
400029cd28cSJaegeuk Kim 	 * dir/node pages without enough free segments.
401351df4b2SJaegeuk Kim 	 */
4027f3037a5SJaegeuk Kim 	if (has_not_enough_free_secs(sbi, 0, 0)) {
403351df4b2SJaegeuk Kim 		mutex_lock(&sbi->gc_mutex);
4047702bdbeSJaegeuk Kim 		f2fs_gc(sbi, false, false);
405351df4b2SJaegeuk Kim 	}
406351df4b2SJaegeuk Kim }
407351df4b2SJaegeuk Kim 
4084660f9c0SJaegeuk Kim void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
4094660f9c0SJaegeuk Kim {
4101dcc336bSChao Yu 	/* try to shrink extent cache when there is no enough memory */
411554df79eSJaegeuk Kim 	if (!available_free_memory(sbi, EXTENT_CACHE))
4121dcc336bSChao Yu 		f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
4131dcc336bSChao Yu 
4141b38dc8eSJaegeuk Kim 	/* check the # of cached NAT entries */
4151b38dc8eSJaegeuk Kim 	if (!available_free_memory(sbi, NAT_ENTRIES))
4161b38dc8eSJaegeuk Kim 		try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
4171b38dc8eSJaegeuk Kim 
41831696580SChao Yu 	if (!available_free_memory(sbi, FREE_NIDS))
419ad4edb83SJaegeuk Kim 		try_to_free_nids(sbi, MAX_FREE_NIDS);
420ad4edb83SJaegeuk Kim 	else
42122ad0b6aSJaegeuk Kim 		build_free_nids(sbi, false, false);
42231696580SChao Yu 
423f455c8a5SJaegeuk Kim 	if (!is_idle(sbi))
424f455c8a5SJaegeuk Kim 		return;
425e5e7ea3cSJaegeuk Kim 
42688a70a69SJaegeuk Kim 	/* checkpoint is the only way to shrink partial cached entries */
4274660f9c0SJaegeuk Kim 	if (!available_free_memory(sbi, NAT_ENTRIES) ||
42860b99b48SJaegeuk Kim 			!available_free_memory(sbi, INO_ENTRIES) ||
4297d768d2cSChao Yu 			excess_prefree_segs(sbi) ||
4307d768d2cSChao Yu 			excess_dirty_nats(sbi) ||
431f455c8a5SJaegeuk Kim 			f2fs_time_over(sbi, CP_TIME)) {
432e9f5b8b8SChao Yu 		if (test_opt(sbi, DATA_FLUSH)) {
433e9f5b8b8SChao Yu 			struct blk_plug plug;
434e9f5b8b8SChao Yu 
435e9f5b8b8SChao Yu 			blk_start_plug(&plug);
43636b35a0dSChao Yu 			sync_dirty_inodes(sbi, FILE_INODE);
437e9f5b8b8SChao Yu 			blk_finish_plug(&plug);
438e9f5b8b8SChao Yu 		}
4394660f9c0SJaegeuk Kim 		f2fs_sync_fs(sbi->sb, true);
44042190d2aSJaegeuk Kim 		stat_inc_bg_cp_count(sbi->stat_info);
4414660f9c0SJaegeuk Kim 	}
44236b35a0dSChao Yu }
4434660f9c0SJaegeuk Kim 
44420fda56bSKinglong Mee static int __submit_flush_wait(struct f2fs_sb_info *sbi,
44520fda56bSKinglong Mee 				struct block_device *bdev)
4463c62be17SJaegeuk Kim {
4473c62be17SJaegeuk Kim 	struct bio *bio = f2fs_bio_alloc(0);
4483c62be17SJaegeuk Kim 	int ret;
4493c62be17SJaegeuk Kim 
45009cb6464SLinus Torvalds 	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
4513c62be17SJaegeuk Kim 	bio->bi_bdev = bdev;
4523c62be17SJaegeuk Kim 	ret = submit_bio_wait(bio);
4533c62be17SJaegeuk Kim 	bio_put(bio);
45420fda56bSKinglong Mee 
45520fda56bSKinglong Mee 	trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
45620fda56bSKinglong Mee 				test_opt(sbi, FLUSH_MERGE), ret);
4573c62be17SJaegeuk Kim 	return ret;
4583c62be17SJaegeuk Kim }
4593c62be17SJaegeuk Kim 
4603c62be17SJaegeuk Kim static int submit_flush_wait(struct f2fs_sb_info *sbi)
4613c62be17SJaegeuk Kim {
46220fda56bSKinglong Mee 	int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev);
4633c62be17SJaegeuk Kim 	int i;
4643c62be17SJaegeuk Kim 
46520fda56bSKinglong Mee 	if (!sbi->s_ndevs || ret)
46620fda56bSKinglong Mee 		return ret;
46720fda56bSKinglong Mee 
4683c62be17SJaegeuk Kim 	for (i = 1; i < sbi->s_ndevs; i++) {
46920fda56bSKinglong Mee 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
4703c62be17SJaegeuk Kim 		if (ret)
4713c62be17SJaegeuk Kim 			break;
4723c62be17SJaegeuk Kim 	}
4733c62be17SJaegeuk Kim 	return ret;
4743c62be17SJaegeuk Kim }
4753c62be17SJaegeuk Kim 
4762163d198SGu Zheng static int issue_flush_thread(void *data)
4776b4afdd7SJaegeuk Kim {
4786b4afdd7SJaegeuk Kim 	struct f2fs_sb_info *sbi = data;
479b01a9201SJaegeuk Kim 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
480a688b9d9SGu Zheng 	wait_queue_head_t *q = &fcc->flush_wait_queue;
4816b4afdd7SJaegeuk Kim repeat:
4826b4afdd7SJaegeuk Kim 	if (kthread_should_stop())
4836b4afdd7SJaegeuk Kim 		return 0;
4846b4afdd7SJaegeuk Kim 
485721bd4d5SGu Zheng 	if (!llist_empty(&fcc->issue_list)) {
4866b4afdd7SJaegeuk Kim 		struct flush_cmd *cmd, *next;
4876b4afdd7SJaegeuk Kim 		int ret;
4886b4afdd7SJaegeuk Kim 
489721bd4d5SGu Zheng 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
490721bd4d5SGu Zheng 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
491721bd4d5SGu Zheng 
4923c62be17SJaegeuk Kim 		ret = submit_flush_wait(sbi);
4938b8dd65fSChao Yu 		atomic_inc(&fcc->issued_flush);
4948b8dd65fSChao Yu 
495721bd4d5SGu Zheng 		llist_for_each_entry_safe(cmd, next,
496721bd4d5SGu Zheng 					  fcc->dispatch_list, llnode) {
4976b4afdd7SJaegeuk Kim 			cmd->ret = ret;
4986b4afdd7SJaegeuk Kim 			complete(&cmd->wait);
4996b4afdd7SJaegeuk Kim 		}
500a688b9d9SGu Zheng 		fcc->dispatch_list = NULL;
5016b4afdd7SJaegeuk Kim 	}
5026b4afdd7SJaegeuk Kim 
503a688b9d9SGu Zheng 	wait_event_interruptible(*q,
504721bd4d5SGu Zheng 		kthread_should_stop() || !llist_empty(&fcc->issue_list));
5056b4afdd7SJaegeuk Kim 	goto repeat;
5066b4afdd7SJaegeuk Kim }
5076b4afdd7SJaegeuk Kim 
5086b4afdd7SJaegeuk Kim int f2fs_issue_flush(struct f2fs_sb_info *sbi)
5096b4afdd7SJaegeuk Kim {
510b01a9201SJaegeuk Kim 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
511adf8d90bSChao Yu 	struct flush_cmd cmd;
5128b8dd65fSChao Yu 	int ret;
5136b4afdd7SJaegeuk Kim 
5140f7b2abdSJaegeuk Kim 	if (test_opt(sbi, NOBARRIER))
5150f7b2abdSJaegeuk Kim 		return 0;
5160f7b2abdSJaegeuk Kim 
5178b8dd65fSChao Yu 	if (!test_opt(sbi, FLUSH_MERGE)) {
5183c62be17SJaegeuk Kim 		ret = submit_flush_wait(sbi);
5198b8dd65fSChao Yu 		atomic_inc(&fcc->issued_flush);
5208b8dd65fSChao Yu 		return ret;
5218b8dd65fSChao Yu 	}
5228b8dd65fSChao Yu 
5238b8dd65fSChao Yu 	if (!atomic_read(&fcc->issing_flush)) {
5248b8dd65fSChao Yu 		atomic_inc(&fcc->issing_flush);
5258b8dd65fSChao Yu 		ret = submit_flush_wait(sbi);
5268b8dd65fSChao Yu 		atomic_dec(&fcc->issing_flush);
5278b8dd65fSChao Yu 
5288b8dd65fSChao Yu 		atomic_inc(&fcc->issued_flush);
529740432f8SJaegeuk Kim 		return ret;
530740432f8SJaegeuk Kim 	}
5316b4afdd7SJaegeuk Kim 
532adf8d90bSChao Yu 	init_completion(&cmd.wait);
5336b4afdd7SJaegeuk Kim 
5348b8dd65fSChao Yu 	atomic_inc(&fcc->issing_flush);
535721bd4d5SGu Zheng 	llist_add(&cmd.llnode, &fcc->issue_list);
5366b4afdd7SJaegeuk Kim 
537a688b9d9SGu Zheng 	if (!fcc->dispatch_list)
538a688b9d9SGu Zheng 		wake_up(&fcc->flush_wait_queue);
5396b4afdd7SJaegeuk Kim 
5405eba8c5dSJaegeuk Kim 	if (fcc->f2fs_issue_flush) {
541adf8d90bSChao Yu 		wait_for_completion(&cmd.wait);
5428b8dd65fSChao Yu 		atomic_dec(&fcc->issing_flush);
5435eba8c5dSJaegeuk Kim 	} else {
5445eba8c5dSJaegeuk Kim 		llist_del_all(&fcc->issue_list);
5458b8dd65fSChao Yu 		atomic_set(&fcc->issing_flush, 0);
5465eba8c5dSJaegeuk Kim 	}
547adf8d90bSChao Yu 
548adf8d90bSChao Yu 	return cmd.ret;
5496b4afdd7SJaegeuk Kim }
5506b4afdd7SJaegeuk Kim 
5512163d198SGu Zheng int create_flush_cmd_control(struct f2fs_sb_info *sbi)
5522163d198SGu Zheng {
5532163d198SGu Zheng 	dev_t dev = sbi->sb->s_bdev->bd_dev;
5542163d198SGu Zheng 	struct flush_cmd_control *fcc;
5552163d198SGu Zheng 	int err = 0;
5562163d198SGu Zheng 
557b01a9201SJaegeuk Kim 	if (SM_I(sbi)->fcc_info) {
558b01a9201SJaegeuk Kim 		fcc = SM_I(sbi)->fcc_info;
5595eba8c5dSJaegeuk Kim 		goto init_thread;
5605eba8c5dSJaegeuk Kim 	}
5615eba8c5dSJaegeuk Kim 
5622163d198SGu Zheng 	fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
5632163d198SGu Zheng 	if (!fcc)
5642163d198SGu Zheng 		return -ENOMEM;
5658b8dd65fSChao Yu 	atomic_set(&fcc->issued_flush, 0);
5668b8dd65fSChao Yu 	atomic_set(&fcc->issing_flush, 0);
5672163d198SGu Zheng 	init_waitqueue_head(&fcc->flush_wait_queue);
568721bd4d5SGu Zheng 	init_llist_head(&fcc->issue_list);
569b01a9201SJaegeuk Kim 	SM_I(sbi)->fcc_info = fcc;
5705eba8c5dSJaegeuk Kim init_thread:
5712163d198SGu Zheng 	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
5722163d198SGu Zheng 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
5732163d198SGu Zheng 	if (IS_ERR(fcc->f2fs_issue_flush)) {
5742163d198SGu Zheng 		err = PTR_ERR(fcc->f2fs_issue_flush);
5752163d198SGu Zheng 		kfree(fcc);
576b01a9201SJaegeuk Kim 		SM_I(sbi)->fcc_info = NULL;
5772163d198SGu Zheng 		return err;
5782163d198SGu Zheng 	}
5792163d198SGu Zheng 
5802163d198SGu Zheng 	return err;
5812163d198SGu Zheng }
5822163d198SGu Zheng 
5835eba8c5dSJaegeuk Kim void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
5842163d198SGu Zheng {
585b01a9201SJaegeuk Kim 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
5862163d198SGu Zheng 
5875eba8c5dSJaegeuk Kim 	if (fcc && fcc->f2fs_issue_flush) {
5885eba8c5dSJaegeuk Kim 		struct task_struct *flush_thread = fcc->f2fs_issue_flush;
5895eba8c5dSJaegeuk Kim 
5905eba8c5dSJaegeuk Kim 		fcc->f2fs_issue_flush = NULL;
5915eba8c5dSJaegeuk Kim 		kthread_stop(flush_thread);
5925eba8c5dSJaegeuk Kim 	}
5935eba8c5dSJaegeuk Kim 	if (free) {
5942163d198SGu Zheng 		kfree(fcc);
595b01a9201SJaegeuk Kim 		SM_I(sbi)->fcc_info = NULL;
5962163d198SGu Zheng 	}
5975eba8c5dSJaegeuk Kim }
5982163d198SGu Zheng 
599351df4b2SJaegeuk Kim static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
600351df4b2SJaegeuk Kim 		enum dirty_type dirty_type)
601351df4b2SJaegeuk Kim {
602351df4b2SJaegeuk Kim 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
603351df4b2SJaegeuk Kim 
604351df4b2SJaegeuk Kim 	/* need not be added */
605351df4b2SJaegeuk Kim 	if (IS_CURSEG(sbi, segno))
606351df4b2SJaegeuk Kim 		return;
607351df4b2SJaegeuk Kim 
608351df4b2SJaegeuk Kim 	if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
609351df4b2SJaegeuk Kim 		dirty_i->nr_dirty[dirty_type]++;
610351df4b2SJaegeuk Kim 
611351df4b2SJaegeuk Kim 	if (dirty_type == DIRTY) {
612351df4b2SJaegeuk Kim 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
6134625d6aaSChangman Lee 		enum dirty_type t = sentry->type;
614b2f2c390SJaegeuk Kim 
615ec325b52SJaegeuk Kim 		if (unlikely(t >= DIRTY)) {
616ec325b52SJaegeuk Kim 			f2fs_bug_on(sbi, 1);
617ec325b52SJaegeuk Kim 			return;
618ec325b52SJaegeuk Kim 		}
6194625d6aaSChangman Lee 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
6204625d6aaSChangman Lee 			dirty_i->nr_dirty[t]++;
621351df4b2SJaegeuk Kim 	}
622351df4b2SJaegeuk Kim }
623351df4b2SJaegeuk Kim 
624351df4b2SJaegeuk Kim static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
625351df4b2SJaegeuk Kim 		enum dirty_type dirty_type)
626351df4b2SJaegeuk Kim {
627351df4b2SJaegeuk Kim 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
628351df4b2SJaegeuk Kim 
629351df4b2SJaegeuk Kim 	if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
630351df4b2SJaegeuk Kim 		dirty_i->nr_dirty[dirty_type]--;
631351df4b2SJaegeuk Kim 
632351df4b2SJaegeuk Kim 	if (dirty_type == DIRTY) {
6334625d6aaSChangman Lee 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
6344625d6aaSChangman Lee 		enum dirty_type t = sentry->type;
635b2f2c390SJaegeuk Kim 
6364625d6aaSChangman Lee 		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
637b2f2c390SJaegeuk Kim 			dirty_i->nr_dirty[t]--;
638b2f2c390SJaegeuk Kim 
639302bd348SJaegeuk Kim 		if (get_valid_blocks(sbi, segno, true) == 0)
640*4ddb1a4dSJaegeuk Kim 			clear_bit(GET_SEC_FROM_SEG(sbi, segno),
6415ec4e49fSJaegeuk Kim 						dirty_i->victim_secmap);
642351df4b2SJaegeuk Kim 	}
643351df4b2SJaegeuk Kim }
644351df4b2SJaegeuk Kim 
6450a8165d7SJaegeuk Kim /*
646351df4b2SJaegeuk Kim  * Should not occur error such as -ENOMEM.
647351df4b2SJaegeuk Kim  * Adding dirty entry into seglist is not critical operation.
648351df4b2SJaegeuk Kim  * If a given segment is one of current working segments, it won't be added.
649351df4b2SJaegeuk Kim  */
6508d8451afSHaicheng Li static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
651351df4b2SJaegeuk Kim {
652351df4b2SJaegeuk Kim 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
653351df4b2SJaegeuk Kim 	unsigned short valid_blocks;
654351df4b2SJaegeuk Kim 
655351df4b2SJaegeuk Kim 	if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
656351df4b2SJaegeuk Kim 		return;
657351df4b2SJaegeuk Kim 
658351df4b2SJaegeuk Kim 	mutex_lock(&dirty_i->seglist_lock);
659351df4b2SJaegeuk Kim 
660302bd348SJaegeuk Kim 	valid_blocks = get_valid_blocks(sbi, segno, false);
661351df4b2SJaegeuk Kim 
662351df4b2SJaegeuk Kim 	if (valid_blocks == 0) {
663351df4b2SJaegeuk Kim 		__locate_dirty_segment(sbi, segno, PRE);
664351df4b2SJaegeuk Kim 		__remove_dirty_segment(sbi, segno, DIRTY);
665351df4b2SJaegeuk Kim 	} else if (valid_blocks < sbi->blocks_per_seg) {
666351df4b2SJaegeuk Kim 		__locate_dirty_segment(sbi, segno, DIRTY);
667351df4b2SJaegeuk Kim 	} else {
668351df4b2SJaegeuk Kim 		/* Recovery routine with SSR needs this */
669351df4b2SJaegeuk Kim 		__remove_dirty_segment(sbi, segno, DIRTY);
670351df4b2SJaegeuk Kim 	}
671351df4b2SJaegeuk Kim 
672351df4b2SJaegeuk Kim 	mutex_unlock(&dirty_i->seglist_lock);
673351df4b2SJaegeuk Kim }
674351df4b2SJaegeuk Kim 
67515469963SJaegeuk Kim static void __add_discard_cmd(struct f2fs_sb_info *sbi,
676c81abe34SJaegeuk Kim 		struct block_device *bdev, block_t lstart,
677c81abe34SJaegeuk Kim 		block_t start, block_t len)
678275b66b0SChao Yu {
6790b54fb84SJaegeuk Kim 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
68022d375ddSChao Yu 	struct list_head *pend_list = &(dcc->discard_pend_list);
681b01a9201SJaegeuk Kim 	struct discard_cmd *dc;
682275b66b0SChao Yu 
683b01a9201SJaegeuk Kim 	dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
684b01a9201SJaegeuk Kim 	INIT_LIST_HEAD(&dc->list);
685c81abe34SJaegeuk Kim 	dc->bdev = bdev;
686b01a9201SJaegeuk Kim 	dc->lstart = lstart;
687c81abe34SJaegeuk Kim 	dc->start = start;
688b01a9201SJaegeuk Kim 	dc->len = len;
68915469963SJaegeuk Kim 	dc->state = D_PREP;
690c81abe34SJaegeuk Kim 	dc->error = 0;
691b01a9201SJaegeuk Kim 	init_completion(&dc->wait);
692275b66b0SChao Yu 
69315469963SJaegeuk Kim 	mutex_lock(&dcc->cmd_lock);
69422d375ddSChao Yu 	list_add_tail(&dc->list, pend_list);
69515469963SJaegeuk Kim 	mutex_unlock(&dcc->cmd_lock);
6965f32366aSChao Yu 
6975f32366aSChao Yu 	atomic_inc(&dcc->discard_cmd_cnt);
69815469963SJaegeuk Kim }
69915469963SJaegeuk Kim 
70015469963SJaegeuk Kim static void __remove_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd *dc)
70115469963SJaegeuk Kim {
702dcc9165dSJaegeuk Kim 	if (dc->state == D_DONE)
7038b8dd65fSChao Yu 		atomic_dec(&(SM_I(sbi)->dcc_info->issing_discard));
704dcc9165dSJaegeuk Kim 
705c81abe34SJaegeuk Kim 	if (dc->error == -EOPNOTSUPP)
706c81abe34SJaegeuk Kim 		dc->error = 0;
70715469963SJaegeuk Kim 
708c81abe34SJaegeuk Kim 	if (dc->error)
70915469963SJaegeuk Kim 		f2fs_msg(sbi->sb, KERN_INFO,
710c81abe34SJaegeuk Kim 				"Issue discard failed, ret: %d", dc->error);
71115469963SJaegeuk Kim 	list_del(&dc->list);
71215469963SJaegeuk Kim 	kmem_cache_free(discard_cmd_slab, dc);
7135f32366aSChao Yu 	atomic_dec(&SM_I(sbi)->dcc_info->discard_cmd_cnt);
714275b66b0SChao Yu }
715275b66b0SChao Yu 
716c81abe34SJaegeuk Kim static void f2fs_submit_discard_endio(struct bio *bio)
717c81abe34SJaegeuk Kim {
718c81abe34SJaegeuk Kim 	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
719c81abe34SJaegeuk Kim 
720c81abe34SJaegeuk Kim 	dc->error = bio->bi_error;
721c81abe34SJaegeuk Kim 	dc->state = D_DONE;
722fa64a003SChao Yu 	complete(&dc->wait);
723c81abe34SJaegeuk Kim 	bio_put(bio);
724c81abe34SJaegeuk Kim }
725c81abe34SJaegeuk Kim 
726c81abe34SJaegeuk Kim /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
727c81abe34SJaegeuk Kim static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
728c81abe34SJaegeuk Kim 				struct discard_cmd *dc)
729c81abe34SJaegeuk Kim {
730c81abe34SJaegeuk Kim 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
731c81abe34SJaegeuk Kim 	struct bio *bio = NULL;
732c81abe34SJaegeuk Kim 
733c81abe34SJaegeuk Kim 	if (dc->state != D_PREP)
734c81abe34SJaegeuk Kim 		return;
735c81abe34SJaegeuk Kim 
736c81abe34SJaegeuk Kim 	dc->error = __blkdev_issue_discard(dc->bdev,
737c81abe34SJaegeuk Kim 				SECTOR_FROM_BLOCK(dc->start),
738c81abe34SJaegeuk Kim 				SECTOR_FROM_BLOCK(dc->len),
739c81abe34SJaegeuk Kim 				GFP_NOFS, 0, &bio);
740c81abe34SJaegeuk Kim 	if (!dc->error) {
741c81abe34SJaegeuk Kim 		/* should keep before submission to avoid D_DONE right away */
742c81abe34SJaegeuk Kim 		dc->state = D_SUBMIT;
7438b8dd65fSChao Yu 		atomic_inc(&dcc->issued_discard);
7448b8dd65fSChao Yu 		atomic_inc(&dcc->issing_discard);
745c81abe34SJaegeuk Kim 		if (bio) {
746c81abe34SJaegeuk Kim 			bio->bi_private = dc;
747c81abe34SJaegeuk Kim 			bio->bi_end_io = f2fs_submit_discard_endio;
748c81abe34SJaegeuk Kim 			bio->bi_opf |= REQ_SYNC;
749c81abe34SJaegeuk Kim 			submit_bio(bio);
75022d375ddSChao Yu 			list_move_tail(&dc->list, &dcc->discard_wait_list);
751c81abe34SJaegeuk Kim 		}
752c81abe34SJaegeuk Kim 	} else {
753c81abe34SJaegeuk Kim 		__remove_discard_cmd(sbi, dc);
754c81abe34SJaegeuk Kim 	}
755c81abe34SJaegeuk Kim }
756c81abe34SJaegeuk Kim 
757c81abe34SJaegeuk Kim static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
758c81abe34SJaegeuk Kim 		struct block_device *bdev, block_t blkstart, block_t blklen)
759c81abe34SJaegeuk Kim {
760c81abe34SJaegeuk Kim 	block_t lblkstart = blkstart;
761c81abe34SJaegeuk Kim 
762c81abe34SJaegeuk Kim 	trace_f2fs_issue_discard(bdev, blkstart, blklen);
763c81abe34SJaegeuk Kim 
764c81abe34SJaegeuk Kim 	if (sbi->s_ndevs) {
765c81abe34SJaegeuk Kim 		int devi = f2fs_target_device_index(sbi, blkstart);
766c81abe34SJaegeuk Kim 
767c81abe34SJaegeuk Kim 		blkstart -= FDEV(devi).start_blk;
768c81abe34SJaegeuk Kim 	}
769c81abe34SJaegeuk Kim 	__add_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen);
770c81abe34SJaegeuk Kim 	wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue);
771c81abe34SJaegeuk Kim 	return 0;
772c81abe34SJaegeuk Kim }
773c81abe34SJaegeuk Kim 
7743d6a650fSYunlei He static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
7753d6a650fSYunlei He 				struct discard_cmd *dc, block_t blkaddr)
7763d6a650fSYunlei He {
7773d6a650fSYunlei He 	block_t end_block = START_BLOCK(sbi, GET_SEGNO(sbi, blkaddr) + 1);
7783d6a650fSYunlei He 
7793d6a650fSYunlei He 	if (dc->state == D_DONE || dc->lstart + dc->len <= end_block) {
7803d6a650fSYunlei He 		__remove_discard_cmd(sbi, dc);
7813d6a650fSYunlei He 		return;
7823d6a650fSYunlei He 	}
7833d6a650fSYunlei He 
7843d6a650fSYunlei He 	if (blkaddr - dc->lstart < dc->lstart + dc->len - end_block) {
7853d6a650fSYunlei He 		dc->start += (end_block - dc->lstart);
7863d6a650fSYunlei He 		dc->len -= (end_block - dc->lstart);
7873d6a650fSYunlei He 		dc->lstart = end_block;
7883d6a650fSYunlei He 	} else {
7893d6a650fSYunlei He 		dc->len = blkaddr - dc->lstart;
7903d6a650fSYunlei He 	}
7913d6a650fSYunlei He }
7923d6a650fSYunlei He 
7934e6a8d9bSJaegeuk Kim /* This should be covered by global mutex, &sit_i->sentry_lock */
7944e6a8d9bSJaegeuk Kim void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
795275b66b0SChao Yu {
7960b54fb84SJaegeuk Kim 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
79722d375ddSChao Yu 	struct list_head *pend_list = &(dcc->discard_pend_list);
79822d375ddSChao Yu 	struct list_head *wait_list = &(dcc->discard_wait_list);
799b01a9201SJaegeuk Kim 	struct discard_cmd *dc, *tmp;
800275b66b0SChao Yu 
80115469963SJaegeuk Kim 	mutex_lock(&dcc->cmd_lock);
80240465257SJaegeuk Kim 
80322d375ddSChao Yu 	list_for_each_entry_safe(dc, tmp, pend_list, list) {
80422d375ddSChao Yu 		if (dc->lstart <= blkaddr && blkaddr < dc->lstart + dc->len)
80522d375ddSChao Yu 			__punch_discard_cmd(sbi, dc, blkaddr);
8064e6a8d9bSJaegeuk Kim 	}
8074e6a8d9bSJaegeuk Kim 
80822d375ddSChao Yu 	list_for_each_entry_safe(dc, tmp, wait_list, list) {
80915469963SJaegeuk Kim 		if (dc->lstart <= blkaddr && blkaddr < dc->lstart + dc->len) {
81015469963SJaegeuk Kim 			wait_for_completion_io(&dc->wait);
8113d6a650fSYunlei He 			__punch_discard_cmd(sbi, dc, blkaddr);
812275b66b0SChao Yu 		}
813275b66b0SChao Yu 	}
81440465257SJaegeuk Kim 
815d431413fSChao Yu 	mutex_unlock(&dcc->cmd_lock);
816d431413fSChao Yu }
817d431413fSChao Yu 
818d431413fSChao Yu /* This comes from f2fs_put_super */
819d431413fSChao Yu void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
820d431413fSChao Yu {
821d431413fSChao Yu 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
822d431413fSChao Yu 	struct list_head *pend_list = &(dcc->discard_pend_list);
823d431413fSChao Yu 	struct list_head *wait_list = &(dcc->discard_wait_list);
824d431413fSChao Yu 	struct discard_cmd *dc, *tmp;
825d431413fSChao Yu 	struct blk_plug plug;
826d431413fSChao Yu 
827d431413fSChao Yu 	mutex_lock(&dcc->cmd_lock);
828d431413fSChao Yu 
82922d375ddSChao Yu 	blk_start_plug(&plug);
83022d375ddSChao Yu 	list_for_each_entry_safe(dc, tmp, pend_list, list)
83122d375ddSChao Yu 		__submit_discard_cmd(sbi, dc);
83222d375ddSChao Yu 	blk_finish_plug(&plug);
83322d375ddSChao Yu 
83440465257SJaegeuk Kim 	list_for_each_entry_safe(dc, tmp, wait_list, list) {
83540465257SJaegeuk Kim 		wait_for_completion_io(&dc->wait);
83640465257SJaegeuk Kim 		__remove_discard_cmd(sbi, dc);
83740465257SJaegeuk Kim 	}
838d431413fSChao Yu 
83915469963SJaegeuk Kim 	mutex_unlock(&dcc->cmd_lock);
84015469963SJaegeuk Kim }
841275b66b0SChao Yu 
84215469963SJaegeuk Kim static int issue_discard_thread(void *data)
84315469963SJaegeuk Kim {
84415469963SJaegeuk Kim 	struct f2fs_sb_info *sbi = data;
84515469963SJaegeuk Kim 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
84615469963SJaegeuk Kim 	wait_queue_head_t *q = &dcc->discard_wait_queue;
84722d375ddSChao Yu 	struct list_head *pend_list = &dcc->discard_pend_list;
84822d375ddSChao Yu 	struct list_head *wait_list = &dcc->discard_wait_list;
84915469963SJaegeuk Kim 	struct discard_cmd *dc, *tmp;
85015469963SJaegeuk Kim 	struct blk_plug plug;
85115469963SJaegeuk Kim 	int iter = 0;
85215469963SJaegeuk Kim repeat:
85315469963SJaegeuk Kim 	if (kthread_should_stop())
85415469963SJaegeuk Kim 		return 0;
85515469963SJaegeuk Kim 
85615469963SJaegeuk Kim 	blk_start_plug(&plug);
85715469963SJaegeuk Kim 
85815469963SJaegeuk Kim 	mutex_lock(&dcc->cmd_lock);
85922d375ddSChao Yu 	list_for_each_entry_safe(dc, tmp, pend_list, list) {
86022d375ddSChao Yu 		f2fs_bug_on(sbi, dc->state != D_PREP);
861c81abe34SJaegeuk Kim 
862c81abe34SJaegeuk Kim 		if (is_idle(sbi))
863c81abe34SJaegeuk Kim 			__submit_discard_cmd(sbi, dc);
864c81abe34SJaegeuk Kim 
86522d375ddSChao Yu 		if (iter++ > DISCARD_ISSUE_RATE)
86615469963SJaegeuk Kim 			break;
86722d375ddSChao Yu 	}
86822d375ddSChao Yu 
86922d375ddSChao Yu 	list_for_each_entry_safe(dc, tmp, wait_list, list) {
870fa64a003SChao Yu 		if (dc->state == D_DONE) {
871fa64a003SChao Yu 			wait_for_completion_io(&dc->wait);
87215469963SJaegeuk Kim 			__remove_discard_cmd(sbi, dc);
87315469963SJaegeuk Kim 		}
874fa64a003SChao Yu 	}
87515469963SJaegeuk Kim 	mutex_unlock(&dcc->cmd_lock);
87615469963SJaegeuk Kim 
87715469963SJaegeuk Kim 	blk_finish_plug(&plug);
87815469963SJaegeuk Kim 
87915469963SJaegeuk Kim 	iter = 0;
88015469963SJaegeuk Kim 	congestion_wait(BLK_RW_SYNC, HZ/50);
88115469963SJaegeuk Kim 
88222d375ddSChao Yu 	wait_event_interruptible(*q, kthread_should_stop() ||
88322d375ddSChao Yu 			!list_empty(pend_list) || !list_empty(wait_list));
88415469963SJaegeuk Kim 	goto repeat;
88515469963SJaegeuk Kim }
88615469963SJaegeuk Kim 
887f46e8809SDamien Le Moal #ifdef CONFIG_BLK_DEV_ZONED
8883c62be17SJaegeuk Kim static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
8893c62be17SJaegeuk Kim 		struct block_device *bdev, block_t blkstart, block_t blklen)
890f46e8809SDamien Le Moal {
89192592285SJaegeuk Kim 	sector_t sector, nr_sects;
89210a875f8SKinglong Mee 	block_t lblkstart = blkstart;
8933c62be17SJaegeuk Kim 	int devi = 0;
894f46e8809SDamien Le Moal 
8953c62be17SJaegeuk Kim 	if (sbi->s_ndevs) {
8963c62be17SJaegeuk Kim 		devi = f2fs_target_device_index(sbi, blkstart);
8973c62be17SJaegeuk Kim 		blkstart -= FDEV(devi).start_blk;
8983c62be17SJaegeuk Kim 	}
899f46e8809SDamien Le Moal 
900f46e8809SDamien Le Moal 	/*
901f46e8809SDamien Le Moal 	 * We need to know the type of the zone: for conventional zones,
902f46e8809SDamien Le Moal 	 * use regular discard if the drive supports it. For sequential
903f46e8809SDamien Le Moal 	 * zones, reset the zone write pointer.
904f46e8809SDamien Le Moal 	 */
9053c62be17SJaegeuk Kim 	switch (get_blkz_type(sbi, bdev, blkstart)) {
906f46e8809SDamien Le Moal 
907f46e8809SDamien Le Moal 	case BLK_ZONE_TYPE_CONVENTIONAL:
908f46e8809SDamien Le Moal 		if (!blk_queue_discard(bdev_get_queue(bdev)))
909f46e8809SDamien Le Moal 			return 0;
910c81abe34SJaegeuk Kim 		return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
911f46e8809SDamien Le Moal 	case BLK_ZONE_TYPE_SEQWRITE_REQ:
912f46e8809SDamien Le Moal 	case BLK_ZONE_TYPE_SEQWRITE_PREF:
91392592285SJaegeuk Kim 		sector = SECTOR_FROM_BLOCK(blkstart);
91492592285SJaegeuk Kim 		nr_sects = SECTOR_FROM_BLOCK(blklen);
91592592285SJaegeuk Kim 
91692592285SJaegeuk Kim 		if (sector & (bdev_zone_sectors(bdev) - 1) ||
91792592285SJaegeuk Kim 				nr_sects != bdev_zone_sectors(bdev)) {
91892592285SJaegeuk Kim 			f2fs_msg(sbi->sb, KERN_INFO,
91992592285SJaegeuk Kim 				"(%d) %s: Unaligned discard attempted (block %x + %x)",
92092592285SJaegeuk Kim 				devi, sbi->s_ndevs ? FDEV(devi).path: "",
92192592285SJaegeuk Kim 				blkstart, blklen);
92292592285SJaegeuk Kim 			return -EIO;
92392592285SJaegeuk Kim 		}
924d50aaeecSJaegeuk Kim 		trace_f2fs_issue_reset_zone(bdev, blkstart);
925f46e8809SDamien Le Moal 		return blkdev_reset_zones(bdev, sector,
926f46e8809SDamien Le Moal 					  nr_sects, GFP_NOFS);
927f46e8809SDamien Le Moal 	default:
928f46e8809SDamien Le Moal 		/* Unknown zone type: broken device ? */
929f46e8809SDamien Le Moal 		return -EIO;
930f46e8809SDamien Le Moal 	}
931f46e8809SDamien Le Moal }
932f46e8809SDamien Le Moal #endif
933f46e8809SDamien Le Moal 
9343c62be17SJaegeuk Kim static int __issue_discard_async(struct f2fs_sb_info *sbi,
9353c62be17SJaegeuk Kim 		struct block_device *bdev, block_t blkstart, block_t blklen)
9363c62be17SJaegeuk Kim {
9373c62be17SJaegeuk Kim #ifdef CONFIG_BLK_DEV_ZONED
9383c62be17SJaegeuk Kim 	if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
9393c62be17SJaegeuk Kim 				bdev_zoned_model(bdev) != BLK_ZONED_NONE)
9403c62be17SJaegeuk Kim 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
9413c62be17SJaegeuk Kim #endif
942c81abe34SJaegeuk Kim 	return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
9433c62be17SJaegeuk Kim }
9443c62be17SJaegeuk Kim 
9451e87a78dSJaegeuk Kim static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
94637208879SJaegeuk Kim 				block_t blkstart, block_t blklen)
94737208879SJaegeuk Kim {
9483c62be17SJaegeuk Kim 	sector_t start = blkstart, len = 0;
9493c62be17SJaegeuk Kim 	struct block_device *bdev;
950a66cdd98SJaegeuk Kim 	struct seg_entry *se;
951a66cdd98SJaegeuk Kim 	unsigned int offset;
952a66cdd98SJaegeuk Kim 	block_t i;
9533c62be17SJaegeuk Kim 	int err = 0;
954a66cdd98SJaegeuk Kim 
9553c62be17SJaegeuk Kim 	bdev = f2fs_target_device(sbi, blkstart, NULL);
9563c62be17SJaegeuk Kim 
9573c62be17SJaegeuk Kim 	for (i = blkstart; i < blkstart + blklen; i++, len++) {
9583c62be17SJaegeuk Kim 		if (i != start) {
9593c62be17SJaegeuk Kim 			struct block_device *bdev2 =
9603c62be17SJaegeuk Kim 				f2fs_target_device(sbi, i, NULL);
9613c62be17SJaegeuk Kim 
9623c62be17SJaegeuk Kim 			if (bdev2 != bdev) {
9633c62be17SJaegeuk Kim 				err = __issue_discard_async(sbi, bdev,
9643c62be17SJaegeuk Kim 						start, len);
9653c62be17SJaegeuk Kim 				if (err)
9663c62be17SJaegeuk Kim 					return err;
9673c62be17SJaegeuk Kim 				bdev = bdev2;
9683c62be17SJaegeuk Kim 				start = i;
9693c62be17SJaegeuk Kim 				len = 0;
9703c62be17SJaegeuk Kim 			}
9713c62be17SJaegeuk Kim 		}
9723c62be17SJaegeuk Kim 
973a66cdd98SJaegeuk Kim 		se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
974a66cdd98SJaegeuk Kim 		offset = GET_BLKOFF_FROM_SEG0(sbi, i);
975a66cdd98SJaegeuk Kim 
976a66cdd98SJaegeuk Kim 		if (!f2fs_test_and_set_bit(offset, se->discard_map))
977a66cdd98SJaegeuk Kim 			sbi->discard_blks--;
978a66cdd98SJaegeuk Kim 	}
979f46e8809SDamien Le Moal 
9803c62be17SJaegeuk Kim 	if (len)
9813c62be17SJaegeuk Kim 		err = __issue_discard_async(sbi, bdev, start, len);
9823c62be17SJaegeuk Kim 	return err;
9831e87a78dSJaegeuk Kim }
9841e87a78dSJaegeuk Kim 
98525290fa5SJaegeuk Kim static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
98625290fa5SJaegeuk Kim 							bool check_only)
987adf4983bSJaegeuk Kim {
988b2955550SJaegeuk Kim 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
989b2955550SJaegeuk Kim 	int max_blocks = sbi->blocks_per_seg;
9904b2fecc8SJaegeuk Kim 	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
991b2955550SJaegeuk Kim 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
992b2955550SJaegeuk Kim 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
993a66cdd98SJaegeuk Kim 	unsigned long *discard_map = (unsigned long *)se->discard_map;
99460a3b782SJaegeuk Kim 	unsigned long *dmap = SIT_I(sbi)->tmp_map;
995b2955550SJaegeuk Kim 	unsigned int start = 0, end = -1;
9964b2fecc8SJaegeuk Kim 	bool force = (cpc->reason == CP_DISCARD);
997a7eeb823SChao Yu 	struct discard_entry *de = NULL;
998a7eeb823SChao Yu 	struct list_head *head = &SM_I(sbi)->dcc_info->discard_entry_list;
999b2955550SJaegeuk Kim 	int i;
1000b2955550SJaegeuk Kim 
10013e025740SJaegeuk Kim 	if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
100225290fa5SJaegeuk Kim 		return false;
1003b2955550SJaegeuk Kim 
1004a66cdd98SJaegeuk Kim 	if (!force) {
1005a66cdd98SJaegeuk Kim 		if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
10060b54fb84SJaegeuk Kim 			SM_I(sbi)->dcc_info->nr_discards >=
10070b54fb84SJaegeuk Kim 				SM_I(sbi)->dcc_info->max_discards)
100825290fa5SJaegeuk Kim 			return false;
10094b2fecc8SJaegeuk Kim 	}
1010b2955550SJaegeuk Kim 
1011b2955550SJaegeuk Kim 	/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1012b2955550SJaegeuk Kim 	for (i = 0; i < entries; i++)
1013a66cdd98SJaegeuk Kim 		dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
1014d7bc2484SJaegeuk Kim 				(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
1015b2955550SJaegeuk Kim 
10160b54fb84SJaegeuk Kim 	while (force || SM_I(sbi)->dcc_info->nr_discards <=
10170b54fb84SJaegeuk Kim 				SM_I(sbi)->dcc_info->max_discards) {
1018b2955550SJaegeuk Kim 		start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1019b2955550SJaegeuk Kim 		if (start >= max_blocks)
1020b2955550SJaegeuk Kim 			break;
1021b2955550SJaegeuk Kim 
1022b2955550SJaegeuk Kim 		end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
1023c7b41e16SYunlei He 		if (force && start && end != max_blocks
1024c7b41e16SYunlei He 					&& (end - start) < cpc->trim_minlen)
1025c7b41e16SYunlei He 			continue;
1026c7b41e16SYunlei He 
102725290fa5SJaegeuk Kim 		if (check_only)
102825290fa5SJaegeuk Kim 			return true;
102925290fa5SJaegeuk Kim 
1030a7eeb823SChao Yu 		if (!de) {
1031a7eeb823SChao Yu 			de = f2fs_kmem_cache_alloc(discard_entry_slab,
1032a7eeb823SChao Yu 								GFP_F2FS_ZERO);
1033a7eeb823SChao Yu 			de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1034a7eeb823SChao Yu 			list_add_tail(&de->list, head);
1035a7eeb823SChao Yu 		}
1036a7eeb823SChao Yu 
1037a7eeb823SChao Yu 		for (i = start; i < end; i++)
1038a7eeb823SChao Yu 			__set_bit_le(i, (void *)de->discard_map);
1039a7eeb823SChao Yu 
1040a7eeb823SChao Yu 		SM_I(sbi)->dcc_info->nr_discards += end - start;
1041b2955550SJaegeuk Kim 	}
104225290fa5SJaegeuk Kim 	return false;
1043b2955550SJaegeuk Kim }
1044b2955550SJaegeuk Kim 
10454b2fecc8SJaegeuk Kim void release_discard_addrs(struct f2fs_sb_info *sbi)
10464b2fecc8SJaegeuk Kim {
10470b54fb84SJaegeuk Kim 	struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list);
10484b2fecc8SJaegeuk Kim 	struct discard_entry *entry, *this;
10494b2fecc8SJaegeuk Kim 
10504b2fecc8SJaegeuk Kim 	/* drop caches */
10514b2fecc8SJaegeuk Kim 	list_for_each_entry_safe(entry, this, head, list) {
10524b2fecc8SJaegeuk Kim 		list_del(&entry->list);
10534b2fecc8SJaegeuk Kim 		kmem_cache_free(discard_entry_slab, entry);
10544b2fecc8SJaegeuk Kim 	}
10554b2fecc8SJaegeuk Kim }
10564b2fecc8SJaegeuk Kim 
10570a8165d7SJaegeuk Kim /*
1058351df4b2SJaegeuk Kim  * Should call clear_prefree_segments after checkpoint is done.
1059351df4b2SJaegeuk Kim  */
1060351df4b2SJaegeuk Kim static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
1061351df4b2SJaegeuk Kim {
1062351df4b2SJaegeuk Kim 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1063b65ee148SChao Yu 	unsigned int segno;
1064351df4b2SJaegeuk Kim 
1065351df4b2SJaegeuk Kim 	mutex_lock(&dirty_i->seglist_lock);
10667cd8558bSJaegeuk Kim 	for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
1067351df4b2SJaegeuk Kim 		__set_test_and_free(sbi, segno);
1068351df4b2SJaegeuk Kim 	mutex_unlock(&dirty_i->seglist_lock);
1069351df4b2SJaegeuk Kim }
1070351df4b2SJaegeuk Kim 
1071836b5a63SJaegeuk Kim void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1072351df4b2SJaegeuk Kim {
10730b54fb84SJaegeuk Kim 	struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list);
10742d7b822aSChao Yu 	struct discard_entry *entry, *this;
1075351df4b2SJaegeuk Kim 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
107629e59c14SChangman Lee 	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
107729e59c14SChangman Lee 	unsigned int start = 0, end = -1;
107836abef4eSJaegeuk Kim 	unsigned int secno, start_segno;
1079c24a0fd6SChao Yu 	bool force = (cpc->reason == CP_DISCARD);
1080351df4b2SJaegeuk Kim 
1081351df4b2SJaegeuk Kim 	mutex_lock(&dirty_i->seglist_lock);
108229e59c14SChangman Lee 
1083351df4b2SJaegeuk Kim 	while (1) {
108429e59c14SChangman Lee 		int i;
10857cd8558bSJaegeuk Kim 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
10867cd8558bSJaegeuk Kim 		if (start >= MAIN_SEGS(sbi))
1087351df4b2SJaegeuk Kim 			break;
10887cd8558bSJaegeuk Kim 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
10897cd8558bSJaegeuk Kim 								start + 1);
1090351df4b2SJaegeuk Kim 
109129e59c14SChangman Lee 		for (i = start; i < end; i++)
109229e59c14SChangman Lee 			clear_bit(i, prefree_map);
1093351df4b2SJaegeuk Kim 
109429e59c14SChangman Lee 		dirty_i->nr_dirty[PRE] -= end - start;
109529e59c14SChangman Lee 
1096650d3c4eSYunlei He 		if (!test_opt(sbi, DISCARD))
1097650d3c4eSYunlei He 			continue;
1098650d3c4eSYunlei He 
1099650d3c4eSYunlei He 		if (force && start >= cpc->trim_start &&
1100650d3c4eSYunlei He 					(end - 1) <= cpc->trim_end)
110129e59c14SChangman Lee 				continue;
110229e59c14SChangman Lee 
110336abef4eSJaegeuk Kim 		if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
110437208879SJaegeuk Kim 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
110537208879SJaegeuk Kim 				(end - start) << sbi->log_blocks_per_seg);
110636abef4eSJaegeuk Kim 			continue;
110736abef4eSJaegeuk Kim 		}
110836abef4eSJaegeuk Kim next:
1109*4ddb1a4dSJaegeuk Kim 		secno = GET_SEC_FROM_SEG(sbi, start);
1110*4ddb1a4dSJaegeuk Kim 		start_segno = GET_SEG_FROM_SEC(sbi, secno);
111136abef4eSJaegeuk Kim 		if (!IS_CURSEC(sbi, secno) &&
1112302bd348SJaegeuk Kim 			!get_valid_blocks(sbi, start, true))
111336abef4eSJaegeuk Kim 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
111436abef4eSJaegeuk Kim 				sbi->segs_per_sec << sbi->log_blocks_per_seg);
111536abef4eSJaegeuk Kim 
111636abef4eSJaegeuk Kim 		start = start_segno + sbi->segs_per_sec;
111736abef4eSJaegeuk Kim 		if (start < end)
111836abef4eSJaegeuk Kim 			goto next;
11198b107f5bSJaegeuk Kim 		else
11208b107f5bSJaegeuk Kim 			end = start - 1;
1121351df4b2SJaegeuk Kim 	}
1122351df4b2SJaegeuk Kim 	mutex_unlock(&dirty_i->seglist_lock);
1123b2955550SJaegeuk Kim 
1124b2955550SJaegeuk Kim 	/* send small discards */
11252d7b822aSChao Yu 	list_for_each_entry_safe(entry, this, head, list) {
1126a7eeb823SChao Yu 		unsigned int cur_pos = 0, next_pos, len, total_len = 0;
1127a7eeb823SChao Yu 		bool is_valid = test_bit_le(0, entry->discard_map);
1128a7eeb823SChao Yu 
1129a7eeb823SChao Yu find_next:
1130a7eeb823SChao Yu 		if (is_valid) {
1131a7eeb823SChao Yu 			next_pos = find_next_zero_bit_le(entry->discard_map,
1132a7eeb823SChao Yu 					sbi->blocks_per_seg, cur_pos);
1133a7eeb823SChao Yu 			len = next_pos - cur_pos;
1134a7eeb823SChao Yu 
1135a7eeb823SChao Yu 			if (force && len < cpc->trim_minlen)
1136836b5a63SJaegeuk Kim 				goto skip;
1137a7eeb823SChao Yu 
1138a7eeb823SChao Yu 			f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
1139a7eeb823SChao Yu 									len);
1140a7eeb823SChao Yu 			cpc->trimmed += len;
1141a7eeb823SChao Yu 			total_len += len;
1142a7eeb823SChao Yu 		} else {
1143a7eeb823SChao Yu 			next_pos = find_next_bit_le(entry->discard_map,
1144a7eeb823SChao Yu 					sbi->blocks_per_seg, cur_pos);
1145a7eeb823SChao Yu 		}
1146836b5a63SJaegeuk Kim skip:
1147a7eeb823SChao Yu 		cur_pos = next_pos;
1148a7eeb823SChao Yu 		is_valid = !is_valid;
1149a7eeb823SChao Yu 
1150a7eeb823SChao Yu 		if (cur_pos < sbi->blocks_per_seg)
1151a7eeb823SChao Yu 			goto find_next;
1152a7eeb823SChao Yu 
1153b2955550SJaegeuk Kim 		list_del(&entry->list);
1154a7eeb823SChao Yu 		SM_I(sbi)->dcc_info->nr_discards -= total_len;
1155b2955550SJaegeuk Kim 		kmem_cache_free(discard_entry_slab, entry);
1156b2955550SJaegeuk Kim 	}
1157351df4b2SJaegeuk Kim }
1158351df4b2SJaegeuk Kim 
11598ed59745SJaegeuk Kim static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
11600b54fb84SJaegeuk Kim {
116115469963SJaegeuk Kim 	dev_t dev = sbi->sb->s_bdev->bd_dev;
11620b54fb84SJaegeuk Kim 	struct discard_cmd_control *dcc;
11630b54fb84SJaegeuk Kim 	int err = 0;
11640b54fb84SJaegeuk Kim 
11650b54fb84SJaegeuk Kim 	if (SM_I(sbi)->dcc_info) {
11660b54fb84SJaegeuk Kim 		dcc = SM_I(sbi)->dcc_info;
11670b54fb84SJaegeuk Kim 		goto init_thread;
11680b54fb84SJaegeuk Kim 	}
11690b54fb84SJaegeuk Kim 
11700b54fb84SJaegeuk Kim 	dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
11710b54fb84SJaegeuk Kim 	if (!dcc)
11720b54fb84SJaegeuk Kim 		return -ENOMEM;
11730b54fb84SJaegeuk Kim 
11740b54fb84SJaegeuk Kim 	INIT_LIST_HEAD(&dcc->discard_entry_list);
117522d375ddSChao Yu 	INIT_LIST_HEAD(&dcc->discard_pend_list);
117622d375ddSChao Yu 	INIT_LIST_HEAD(&dcc->discard_wait_list);
117715469963SJaegeuk Kim 	mutex_init(&dcc->cmd_lock);
11788b8dd65fSChao Yu 	atomic_set(&dcc->issued_discard, 0);
11798b8dd65fSChao Yu 	atomic_set(&dcc->issing_discard, 0);
11805f32366aSChao Yu 	atomic_set(&dcc->discard_cmd_cnt, 0);
11810b54fb84SJaegeuk Kim 	dcc->nr_discards = 0;
11820b54fb84SJaegeuk Kim 	dcc->max_discards = 0;
11830b54fb84SJaegeuk Kim 
118415469963SJaegeuk Kim 	init_waitqueue_head(&dcc->discard_wait_queue);
11850b54fb84SJaegeuk Kim 	SM_I(sbi)->dcc_info = dcc;
11860b54fb84SJaegeuk Kim init_thread:
118715469963SJaegeuk Kim 	dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
118815469963SJaegeuk Kim 				"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
118915469963SJaegeuk Kim 	if (IS_ERR(dcc->f2fs_issue_discard)) {
119015469963SJaegeuk Kim 		err = PTR_ERR(dcc->f2fs_issue_discard);
119115469963SJaegeuk Kim 		kfree(dcc);
119215469963SJaegeuk Kim 		SM_I(sbi)->dcc_info = NULL;
119315469963SJaegeuk Kim 		return err;
119415469963SJaegeuk Kim 	}
119515469963SJaegeuk Kim 
11960b54fb84SJaegeuk Kim 	return err;
11970b54fb84SJaegeuk Kim }
11980b54fb84SJaegeuk Kim 
1199f099405fSChao Yu static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
12000b54fb84SJaegeuk Kim {
12010b54fb84SJaegeuk Kim 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
12020b54fb84SJaegeuk Kim 
1203f099405fSChao Yu 	if (!dcc)
1204f099405fSChao Yu 		return;
1205f099405fSChao Yu 
1206f099405fSChao Yu 	if (dcc->f2fs_issue_discard) {
120715469963SJaegeuk Kim 		struct task_struct *discard_thread = dcc->f2fs_issue_discard;
120815469963SJaegeuk Kim 
120915469963SJaegeuk Kim 		dcc->f2fs_issue_discard = NULL;
121015469963SJaegeuk Kim 		kthread_stop(discard_thread);
121115469963SJaegeuk Kim 	}
1212f099405fSChao Yu 
12130b54fb84SJaegeuk Kim 	kfree(dcc);
12140b54fb84SJaegeuk Kim 	SM_I(sbi)->dcc_info = NULL;
12150b54fb84SJaegeuk Kim }
12160b54fb84SJaegeuk Kim 
1217184a5cd2SChao Yu static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
1218351df4b2SJaegeuk Kim {
1219351df4b2SJaegeuk Kim 	struct sit_info *sit_i = SIT_I(sbi);
1220184a5cd2SChao Yu 
1221184a5cd2SChao Yu 	if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
1222351df4b2SJaegeuk Kim 		sit_i->dirty_sentries++;
1223184a5cd2SChao Yu 		return false;
1224184a5cd2SChao Yu 	}
1225184a5cd2SChao Yu 
1226184a5cd2SChao Yu 	return true;
1227351df4b2SJaegeuk Kim }
1228351df4b2SJaegeuk Kim 
1229351df4b2SJaegeuk Kim static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
1230351df4b2SJaegeuk Kim 					unsigned int segno, int modified)
1231351df4b2SJaegeuk Kim {
1232351df4b2SJaegeuk Kim 	struct seg_entry *se = get_seg_entry(sbi, segno);
1233351df4b2SJaegeuk Kim 	se->type = type;
1234351df4b2SJaegeuk Kim 	if (modified)
1235351df4b2SJaegeuk Kim 		__mark_sit_entry_dirty(sbi, segno);
1236351df4b2SJaegeuk Kim }
1237351df4b2SJaegeuk Kim 
1238351df4b2SJaegeuk Kim static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
1239351df4b2SJaegeuk Kim {
1240351df4b2SJaegeuk Kim 	struct seg_entry *se;
1241351df4b2SJaegeuk Kim 	unsigned int segno, offset;
1242351df4b2SJaegeuk Kim 	long int new_vblocks;
1243351df4b2SJaegeuk Kim 
1244351df4b2SJaegeuk Kim 	segno = GET_SEGNO(sbi, blkaddr);
1245351df4b2SJaegeuk Kim 
1246351df4b2SJaegeuk Kim 	se = get_seg_entry(sbi, segno);
1247351df4b2SJaegeuk Kim 	new_vblocks = se->valid_blocks + del;
1248491c0854SJaegeuk Kim 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1249351df4b2SJaegeuk Kim 
12509850cf4aSJaegeuk Kim 	f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
1251351df4b2SJaegeuk Kim 				(new_vblocks > sbi->blocks_per_seg)));
1252351df4b2SJaegeuk Kim 
1253351df4b2SJaegeuk Kim 	se->valid_blocks = new_vblocks;
1254351df4b2SJaegeuk Kim 	se->mtime = get_mtime(sbi);
1255351df4b2SJaegeuk Kim 	SIT_I(sbi)->max_mtime = se->mtime;
1256351df4b2SJaegeuk Kim 
1257351df4b2SJaegeuk Kim 	/* Update valid block bitmap */
1258351df4b2SJaegeuk Kim 	if (del > 0) {
1259355e7891SChao Yu 		if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) {
1260355e7891SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
1261355e7891SChao Yu 			if (f2fs_test_and_set_bit(offset,
1262355e7891SChao Yu 						se->cur_valid_map_mir))
126305796763SJaegeuk Kim 				f2fs_bug_on(sbi, 1);
1264355e7891SChao Yu 			else
1265355e7891SChao Yu 				WARN_ON(1);
1266355e7891SChao Yu #else
1267355e7891SChao Yu 			f2fs_bug_on(sbi, 1);
1268355e7891SChao Yu #endif
1269355e7891SChao Yu 		}
12703e025740SJaegeuk Kim 		if (f2fs_discard_en(sbi) &&
12713e025740SJaegeuk Kim 			!f2fs_test_and_set_bit(offset, se->discard_map))
1272a66cdd98SJaegeuk Kim 			sbi->discard_blks--;
1273720037f9SJaegeuk Kim 
1274720037f9SJaegeuk Kim 		/* don't overwrite by SSR to keep node chain */
1275720037f9SJaegeuk Kim 		if (se->type == CURSEG_WARM_NODE) {
1276720037f9SJaegeuk Kim 			if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
1277720037f9SJaegeuk Kim 				se->ckpt_valid_blocks++;
1278720037f9SJaegeuk Kim 		}
1279351df4b2SJaegeuk Kim 	} else {
1280355e7891SChao Yu 		if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) {
1281355e7891SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
1282355e7891SChao Yu 			if (!f2fs_test_and_clear_bit(offset,
1283355e7891SChao Yu 						se->cur_valid_map_mir))
128405796763SJaegeuk Kim 				f2fs_bug_on(sbi, 1);
1285355e7891SChao Yu 			else
1286355e7891SChao Yu 				WARN_ON(1);
1287355e7891SChao Yu #else
1288355e7891SChao Yu 			f2fs_bug_on(sbi, 1);
1289355e7891SChao Yu #endif
1290355e7891SChao Yu 		}
12913e025740SJaegeuk Kim 		if (f2fs_discard_en(sbi) &&
12923e025740SJaegeuk Kim 			f2fs_test_and_clear_bit(offset, se->discard_map))
1293a66cdd98SJaegeuk Kim 			sbi->discard_blks++;
1294351df4b2SJaegeuk Kim 	}
1295351df4b2SJaegeuk Kim 	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
1296351df4b2SJaegeuk Kim 		se->ckpt_valid_blocks += del;
1297351df4b2SJaegeuk Kim 
1298351df4b2SJaegeuk Kim 	__mark_sit_entry_dirty(sbi, segno);
1299351df4b2SJaegeuk Kim 
1300351df4b2SJaegeuk Kim 	/* update total number of valid blocks to be written in ckpt area */
1301351df4b2SJaegeuk Kim 	SIT_I(sbi)->written_valid_blocks += del;
1302351df4b2SJaegeuk Kim 
1303351df4b2SJaegeuk Kim 	if (sbi->segs_per_sec > 1)
1304351df4b2SJaegeuk Kim 		get_sec_entry(sbi, segno)->valid_blocks += del;
1305351df4b2SJaegeuk Kim }
1306351df4b2SJaegeuk Kim 
13075e443818SJaegeuk Kim void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
1308351df4b2SJaegeuk Kim {
13095e443818SJaegeuk Kim 	update_sit_entry(sbi, new, 1);
13105e443818SJaegeuk Kim 	if (GET_SEGNO(sbi, old) != NULL_SEGNO)
13115e443818SJaegeuk Kim 		update_sit_entry(sbi, old, -1);
13125e443818SJaegeuk Kim 
13135e443818SJaegeuk Kim 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
13145e443818SJaegeuk Kim 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
1315351df4b2SJaegeuk Kim }
1316351df4b2SJaegeuk Kim 
1317351df4b2SJaegeuk Kim void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
1318351df4b2SJaegeuk Kim {
1319351df4b2SJaegeuk Kim 	unsigned int segno = GET_SEGNO(sbi, addr);
1320351df4b2SJaegeuk Kim 	struct sit_info *sit_i = SIT_I(sbi);
1321351df4b2SJaegeuk Kim 
13229850cf4aSJaegeuk Kim 	f2fs_bug_on(sbi, addr == NULL_ADDR);
1323351df4b2SJaegeuk Kim 	if (addr == NEW_ADDR)
1324351df4b2SJaegeuk Kim 		return;
1325351df4b2SJaegeuk Kim 
1326351df4b2SJaegeuk Kim 	/* add it into sit main buffer */
1327351df4b2SJaegeuk Kim 	mutex_lock(&sit_i->sentry_lock);
1328351df4b2SJaegeuk Kim 
1329351df4b2SJaegeuk Kim 	update_sit_entry(sbi, addr, -1);
1330351df4b2SJaegeuk Kim 
1331351df4b2SJaegeuk Kim 	/* add it into dirty seglist */
1332351df4b2SJaegeuk Kim 	locate_dirty_segment(sbi, segno);
1333351df4b2SJaegeuk Kim 
1334351df4b2SJaegeuk Kim 	mutex_unlock(&sit_i->sentry_lock);
1335351df4b2SJaegeuk Kim }
1336351df4b2SJaegeuk Kim 
13376e2c64adSJaegeuk Kim bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
13386e2c64adSJaegeuk Kim {
13396e2c64adSJaegeuk Kim 	struct sit_info *sit_i = SIT_I(sbi);
13406e2c64adSJaegeuk Kim 	unsigned int segno, offset;
13416e2c64adSJaegeuk Kim 	struct seg_entry *se;
13426e2c64adSJaegeuk Kim 	bool is_cp = false;
13436e2c64adSJaegeuk Kim 
13446e2c64adSJaegeuk Kim 	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
13456e2c64adSJaegeuk Kim 		return true;
13466e2c64adSJaegeuk Kim 
13476e2c64adSJaegeuk Kim 	mutex_lock(&sit_i->sentry_lock);
13486e2c64adSJaegeuk Kim 
13496e2c64adSJaegeuk Kim 	segno = GET_SEGNO(sbi, blkaddr);
13506e2c64adSJaegeuk Kim 	se = get_seg_entry(sbi, segno);
13516e2c64adSJaegeuk Kim 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
13526e2c64adSJaegeuk Kim 
13536e2c64adSJaegeuk Kim 	if (f2fs_test_bit(offset, se->ckpt_valid_map))
13546e2c64adSJaegeuk Kim 		is_cp = true;
13556e2c64adSJaegeuk Kim 
13566e2c64adSJaegeuk Kim 	mutex_unlock(&sit_i->sentry_lock);
13576e2c64adSJaegeuk Kim 
13586e2c64adSJaegeuk Kim 	return is_cp;
13596e2c64adSJaegeuk Kim }
13606e2c64adSJaegeuk Kim 
13610a8165d7SJaegeuk Kim /*
1362351df4b2SJaegeuk Kim  * This function should be resided under the curseg_mutex lock
1363351df4b2SJaegeuk Kim  */
1364351df4b2SJaegeuk Kim static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
1365e79efe3bSHaicheng Li 					struct f2fs_summary *sum)
1366351df4b2SJaegeuk Kim {
1367351df4b2SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1368351df4b2SJaegeuk Kim 	void *addr = curseg->sum_blk;
1369e79efe3bSHaicheng Li 	addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
1370351df4b2SJaegeuk Kim 	memcpy(addr, sum, sizeof(struct f2fs_summary));
1371351df4b2SJaegeuk Kim }
1372351df4b2SJaegeuk Kim 
13730a8165d7SJaegeuk Kim /*
1374351df4b2SJaegeuk Kim  * Calculate the number of current summary pages for writing
1375351df4b2SJaegeuk Kim  */
13763fa06d7bSChao Yu int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
1377351df4b2SJaegeuk Kim {
1378351df4b2SJaegeuk Kim 	int valid_sum_count = 0;
13799a47938bSFan Li 	int i, sum_in_page;
1380351df4b2SJaegeuk Kim 
1381351df4b2SJaegeuk Kim 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1382351df4b2SJaegeuk Kim 		if (sbi->ckpt->alloc_type[i] == SSR)
1383351df4b2SJaegeuk Kim 			valid_sum_count += sbi->blocks_per_seg;
13843fa06d7bSChao Yu 		else {
13853fa06d7bSChao Yu 			if (for_ra)
13863fa06d7bSChao Yu 				valid_sum_count += le16_to_cpu(
13873fa06d7bSChao Yu 					F2FS_CKPT(sbi)->cur_data_blkoff[i]);
1388351df4b2SJaegeuk Kim 			else
1389351df4b2SJaegeuk Kim 				valid_sum_count += curseg_blkoff(sbi, i);
1390351df4b2SJaegeuk Kim 		}
13913fa06d7bSChao Yu 	}
1392351df4b2SJaegeuk Kim 
139309cbfeafSKirill A. Shutemov 	sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
13949a47938bSFan Li 			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
13959a47938bSFan Li 	if (valid_sum_count <= sum_in_page)
1396351df4b2SJaegeuk Kim 		return 1;
13979a47938bSFan Li 	else if ((valid_sum_count - sum_in_page) <=
139809cbfeafSKirill A. Shutemov 		(PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
1399351df4b2SJaegeuk Kim 		return 2;
1400351df4b2SJaegeuk Kim 	return 3;
1401351df4b2SJaegeuk Kim }
1402351df4b2SJaegeuk Kim 
14030a8165d7SJaegeuk Kim /*
1404351df4b2SJaegeuk Kim  * Caller should put this summary page
1405351df4b2SJaegeuk Kim  */
1406351df4b2SJaegeuk Kim struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
1407351df4b2SJaegeuk Kim {
1408351df4b2SJaegeuk Kim 	return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
1409351df4b2SJaegeuk Kim }
1410351df4b2SJaegeuk Kim 
1411381722d2SChao Yu void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
1412381722d2SChao Yu {
1413381722d2SChao Yu 	struct page *page = grab_meta_page(sbi, blk_addr);
1414381722d2SChao Yu 	void *dst = page_address(page);
1415381722d2SChao Yu 
1416381722d2SChao Yu 	if (src)
141709cbfeafSKirill A. Shutemov 		memcpy(dst, src, PAGE_SIZE);
1418381722d2SChao Yu 	else
141909cbfeafSKirill A. Shutemov 		memset(dst, 0, PAGE_SIZE);
1420381722d2SChao Yu 	set_page_dirty(page);
1421381722d2SChao Yu 	f2fs_put_page(page, 1);
1422381722d2SChao Yu }
1423381722d2SChao Yu 
1424351df4b2SJaegeuk Kim static void write_sum_page(struct f2fs_sb_info *sbi,
1425351df4b2SJaegeuk Kim 			struct f2fs_summary_block *sum_blk, block_t blk_addr)
1426351df4b2SJaegeuk Kim {
1427381722d2SChao Yu 	update_meta_page(sbi, (void *)sum_blk, blk_addr);
1428351df4b2SJaegeuk Kim }
1429351df4b2SJaegeuk Kim 
1430b7ad7512SChao Yu static void write_current_sum_page(struct f2fs_sb_info *sbi,
1431b7ad7512SChao Yu 						int type, block_t blk_addr)
1432b7ad7512SChao Yu {
1433b7ad7512SChao Yu 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1434b7ad7512SChao Yu 	struct page *page = grab_meta_page(sbi, blk_addr);
1435b7ad7512SChao Yu 	struct f2fs_summary_block *src = curseg->sum_blk;
1436b7ad7512SChao Yu 	struct f2fs_summary_block *dst;
1437b7ad7512SChao Yu 
1438b7ad7512SChao Yu 	dst = (struct f2fs_summary_block *)page_address(page);
1439b7ad7512SChao Yu 
1440b7ad7512SChao Yu 	mutex_lock(&curseg->curseg_mutex);
1441b7ad7512SChao Yu 
1442b7ad7512SChao Yu 	down_read(&curseg->journal_rwsem);
1443b7ad7512SChao Yu 	memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
1444b7ad7512SChao Yu 	up_read(&curseg->journal_rwsem);
1445b7ad7512SChao Yu 
1446b7ad7512SChao Yu 	memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
1447b7ad7512SChao Yu 	memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
1448b7ad7512SChao Yu 
1449b7ad7512SChao Yu 	mutex_unlock(&curseg->curseg_mutex);
1450b7ad7512SChao Yu 
1451b7ad7512SChao Yu 	set_page_dirty(page);
1452b7ad7512SChao Yu 	f2fs_put_page(page, 1);
1453b7ad7512SChao Yu }
1454b7ad7512SChao Yu 
14550a8165d7SJaegeuk Kim /*
1456351df4b2SJaegeuk Kim  * Find a new segment from the free segments bitmap to right order
1457351df4b2SJaegeuk Kim  * This function should be returned with success, otherwise BUG
1458351df4b2SJaegeuk Kim  */
1459351df4b2SJaegeuk Kim static void get_new_segment(struct f2fs_sb_info *sbi,
1460351df4b2SJaegeuk Kim 			unsigned int *newseg, bool new_sec, int dir)
1461351df4b2SJaegeuk Kim {
1462351df4b2SJaegeuk Kim 	struct free_segmap_info *free_i = FREE_I(sbi);
1463351df4b2SJaegeuk Kim 	unsigned int segno, secno, zoneno;
14647cd8558bSJaegeuk Kim 	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
1465*4ddb1a4dSJaegeuk Kim 	unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
1466*4ddb1a4dSJaegeuk Kim 	unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
1467351df4b2SJaegeuk Kim 	unsigned int left_start = hint;
1468351df4b2SJaegeuk Kim 	bool init = true;
1469351df4b2SJaegeuk Kim 	int go_left = 0;
1470351df4b2SJaegeuk Kim 	int i;
1471351df4b2SJaegeuk Kim 
14721a118ccfSChao Yu 	spin_lock(&free_i->segmap_lock);
1473351df4b2SJaegeuk Kim 
1474351df4b2SJaegeuk Kim 	if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
1475351df4b2SJaegeuk Kim 		segno = find_next_zero_bit(free_i->free_segmap,
1476*4ddb1a4dSJaegeuk Kim 			GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
1477*4ddb1a4dSJaegeuk Kim 		if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
1478351df4b2SJaegeuk Kim 			goto got_it;
1479351df4b2SJaegeuk Kim 	}
1480351df4b2SJaegeuk Kim find_other_zone:
14817cd8558bSJaegeuk Kim 	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
14827cd8558bSJaegeuk Kim 	if (secno >= MAIN_SECS(sbi)) {
1483351df4b2SJaegeuk Kim 		if (dir == ALLOC_RIGHT) {
1484351df4b2SJaegeuk Kim 			secno = find_next_zero_bit(free_i->free_secmap,
14857cd8558bSJaegeuk Kim 							MAIN_SECS(sbi), 0);
14867cd8558bSJaegeuk Kim 			f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
1487351df4b2SJaegeuk Kim 		} else {
1488351df4b2SJaegeuk Kim 			go_left = 1;
1489351df4b2SJaegeuk Kim 			left_start = hint - 1;
1490351df4b2SJaegeuk Kim 		}
1491351df4b2SJaegeuk Kim 	}
1492351df4b2SJaegeuk Kim 	if (go_left == 0)
1493351df4b2SJaegeuk Kim 		goto skip_left;
1494351df4b2SJaegeuk Kim 
1495351df4b2SJaegeuk Kim 	while (test_bit(left_start, free_i->free_secmap)) {
1496351df4b2SJaegeuk Kim 		if (left_start > 0) {
1497351df4b2SJaegeuk Kim 			left_start--;
1498351df4b2SJaegeuk Kim 			continue;
1499351df4b2SJaegeuk Kim 		}
1500351df4b2SJaegeuk Kim 		left_start = find_next_zero_bit(free_i->free_secmap,
15017cd8558bSJaegeuk Kim 							MAIN_SECS(sbi), 0);
15027cd8558bSJaegeuk Kim 		f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
1503351df4b2SJaegeuk Kim 		break;
1504351df4b2SJaegeuk Kim 	}
1505351df4b2SJaegeuk Kim 	secno = left_start;
1506351df4b2SJaegeuk Kim skip_left:
1507351df4b2SJaegeuk Kim 	hint = secno;
1508*4ddb1a4dSJaegeuk Kim 	segno = GET_SEG_FROM_SEC(sbi, secno);
1509*4ddb1a4dSJaegeuk Kim 	zoneno = GET_ZONE_FROM_SEC(sbi, secno);
1510351df4b2SJaegeuk Kim 
1511351df4b2SJaegeuk Kim 	/* give up on finding another zone */
1512351df4b2SJaegeuk Kim 	if (!init)
1513351df4b2SJaegeuk Kim 		goto got_it;
1514351df4b2SJaegeuk Kim 	if (sbi->secs_per_zone == 1)
1515351df4b2SJaegeuk Kim 		goto got_it;
1516351df4b2SJaegeuk Kim 	if (zoneno == old_zoneno)
1517351df4b2SJaegeuk Kim 		goto got_it;
1518351df4b2SJaegeuk Kim 	if (dir == ALLOC_LEFT) {
1519351df4b2SJaegeuk Kim 		if (!go_left && zoneno + 1 >= total_zones)
1520351df4b2SJaegeuk Kim 			goto got_it;
1521351df4b2SJaegeuk Kim 		if (go_left && zoneno == 0)
1522351df4b2SJaegeuk Kim 			goto got_it;
1523351df4b2SJaegeuk Kim 	}
1524351df4b2SJaegeuk Kim 	for (i = 0; i < NR_CURSEG_TYPE; i++)
1525351df4b2SJaegeuk Kim 		if (CURSEG_I(sbi, i)->zone == zoneno)
1526351df4b2SJaegeuk Kim 			break;
1527351df4b2SJaegeuk Kim 
1528351df4b2SJaegeuk Kim 	if (i < NR_CURSEG_TYPE) {
1529351df4b2SJaegeuk Kim 		/* zone is in user, try another */
1530351df4b2SJaegeuk Kim 		if (go_left)
1531351df4b2SJaegeuk Kim 			hint = zoneno * sbi->secs_per_zone - 1;
1532351df4b2SJaegeuk Kim 		else if (zoneno + 1 >= total_zones)
1533351df4b2SJaegeuk Kim 			hint = 0;
1534351df4b2SJaegeuk Kim 		else
1535351df4b2SJaegeuk Kim 			hint = (zoneno + 1) * sbi->secs_per_zone;
1536351df4b2SJaegeuk Kim 		init = false;
1537351df4b2SJaegeuk Kim 		goto find_other_zone;
1538351df4b2SJaegeuk Kim 	}
1539351df4b2SJaegeuk Kim got_it:
1540351df4b2SJaegeuk Kim 	/* set it as dirty segment in free segmap */
15419850cf4aSJaegeuk Kim 	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
1542351df4b2SJaegeuk Kim 	__set_inuse(sbi, segno);
1543351df4b2SJaegeuk Kim 	*newseg = segno;
15441a118ccfSChao Yu 	spin_unlock(&free_i->segmap_lock);
1545351df4b2SJaegeuk Kim }
1546351df4b2SJaegeuk Kim 
1547351df4b2SJaegeuk Kim static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
1548351df4b2SJaegeuk Kim {
1549351df4b2SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1550351df4b2SJaegeuk Kim 	struct summary_footer *sum_footer;
1551351df4b2SJaegeuk Kim 
1552351df4b2SJaegeuk Kim 	curseg->segno = curseg->next_segno;
1553*4ddb1a4dSJaegeuk Kim 	curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
1554351df4b2SJaegeuk Kim 	curseg->next_blkoff = 0;
1555351df4b2SJaegeuk Kim 	curseg->next_segno = NULL_SEGNO;
1556351df4b2SJaegeuk Kim 
1557351df4b2SJaegeuk Kim 	sum_footer = &(curseg->sum_blk->footer);
1558351df4b2SJaegeuk Kim 	memset(sum_footer, 0, sizeof(struct summary_footer));
1559351df4b2SJaegeuk Kim 	if (IS_DATASEG(type))
1560351df4b2SJaegeuk Kim 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
1561351df4b2SJaegeuk Kim 	if (IS_NODESEG(type))
1562351df4b2SJaegeuk Kim 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
1563351df4b2SJaegeuk Kim 	__set_sit_entry_type(sbi, type, curseg->segno, modified);
1564351df4b2SJaegeuk Kim }
1565351df4b2SJaegeuk Kim 
15667a20b8a6SJaegeuk Kim static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
15677a20b8a6SJaegeuk Kim {
15687a20b8a6SJaegeuk Kim 	if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
15697a20b8a6SJaegeuk Kim 		return 0;
15707a20b8a6SJaegeuk Kim 
15717a20b8a6SJaegeuk Kim 	return CURSEG_I(sbi, type)->segno;
15727a20b8a6SJaegeuk Kim }
15737a20b8a6SJaegeuk Kim 
15740a8165d7SJaegeuk Kim /*
1575351df4b2SJaegeuk Kim  * Allocate a current working segment.
1576351df4b2SJaegeuk Kim  * This function always allocates a free segment in LFS manner.
1577351df4b2SJaegeuk Kim  */
1578351df4b2SJaegeuk Kim static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
1579351df4b2SJaegeuk Kim {
1580351df4b2SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1581351df4b2SJaegeuk Kim 	unsigned int segno = curseg->segno;
1582351df4b2SJaegeuk Kim 	int dir = ALLOC_LEFT;
1583351df4b2SJaegeuk Kim 
1584351df4b2SJaegeuk Kim 	write_sum_page(sbi, curseg->sum_blk,
158581fb5e87SHaicheng Li 				GET_SUM_BLOCK(sbi, segno));
1586351df4b2SJaegeuk Kim 	if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
1587351df4b2SJaegeuk Kim 		dir = ALLOC_RIGHT;
1588351df4b2SJaegeuk Kim 
1589351df4b2SJaegeuk Kim 	if (test_opt(sbi, NOHEAP))
1590351df4b2SJaegeuk Kim 		dir = ALLOC_RIGHT;
1591351df4b2SJaegeuk Kim 
15927a20b8a6SJaegeuk Kim 	segno = __get_next_segno(sbi, type);
1593351df4b2SJaegeuk Kim 	get_new_segment(sbi, &segno, new_sec, dir);
1594351df4b2SJaegeuk Kim 	curseg->next_segno = segno;
1595351df4b2SJaegeuk Kim 	reset_curseg(sbi, type, 1);
1596351df4b2SJaegeuk Kim 	curseg->alloc_type = LFS;
1597351df4b2SJaegeuk Kim }
1598351df4b2SJaegeuk Kim 
1599351df4b2SJaegeuk Kim static void __next_free_blkoff(struct f2fs_sb_info *sbi,
1600351df4b2SJaegeuk Kim 			struct curseg_info *seg, block_t start)
1601351df4b2SJaegeuk Kim {
1602351df4b2SJaegeuk Kim 	struct seg_entry *se = get_seg_entry(sbi, seg->segno);
1603e81c93cfSChangman Lee 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
160460a3b782SJaegeuk Kim 	unsigned long *target_map = SIT_I(sbi)->tmp_map;
1605e81c93cfSChangman Lee 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
1606e81c93cfSChangman Lee 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1607e81c93cfSChangman Lee 	int i, pos;
1608e81c93cfSChangman Lee 
1609e81c93cfSChangman Lee 	for (i = 0; i < entries; i++)
1610e81c93cfSChangman Lee 		target_map[i] = ckpt_map[i] | cur_map[i];
1611e81c93cfSChangman Lee 
1612e81c93cfSChangman Lee 	pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
1613e81c93cfSChangman Lee 
1614e81c93cfSChangman Lee 	seg->next_blkoff = pos;
1615351df4b2SJaegeuk Kim }
1616351df4b2SJaegeuk Kim 
16170a8165d7SJaegeuk Kim /*
1618351df4b2SJaegeuk Kim  * If a segment is written by LFS manner, next block offset is just obtained
1619351df4b2SJaegeuk Kim  * by increasing the current block offset. However, if a segment is written by
1620351df4b2SJaegeuk Kim  * SSR manner, next block offset obtained by calling __next_free_blkoff
1621351df4b2SJaegeuk Kim  */
1622351df4b2SJaegeuk Kim static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
1623351df4b2SJaegeuk Kim 				struct curseg_info *seg)
1624351df4b2SJaegeuk Kim {
1625351df4b2SJaegeuk Kim 	if (seg->alloc_type == SSR)
1626351df4b2SJaegeuk Kim 		__next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
1627351df4b2SJaegeuk Kim 	else
1628351df4b2SJaegeuk Kim 		seg->next_blkoff++;
1629351df4b2SJaegeuk Kim }
1630351df4b2SJaegeuk Kim 
16310a8165d7SJaegeuk Kim /*
1632351df4b2SJaegeuk Kim  * This function always allocates a used segment(from dirty seglist) by SSR
1633351df4b2SJaegeuk Kim  * manner, so it should recover the existing segment information of valid blocks
1634351df4b2SJaegeuk Kim  */
1635351df4b2SJaegeuk Kim static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
1636351df4b2SJaegeuk Kim {
1637351df4b2SJaegeuk Kim 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1638351df4b2SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1639351df4b2SJaegeuk Kim 	unsigned int new_segno = curseg->next_segno;
1640351df4b2SJaegeuk Kim 	struct f2fs_summary_block *sum_node;
1641351df4b2SJaegeuk Kim 	struct page *sum_page;
1642351df4b2SJaegeuk Kim 
1643351df4b2SJaegeuk Kim 	write_sum_page(sbi, curseg->sum_blk,
1644351df4b2SJaegeuk Kim 				GET_SUM_BLOCK(sbi, curseg->segno));
1645351df4b2SJaegeuk Kim 	__set_test_and_inuse(sbi, new_segno);
1646351df4b2SJaegeuk Kim 
1647351df4b2SJaegeuk Kim 	mutex_lock(&dirty_i->seglist_lock);
1648351df4b2SJaegeuk Kim 	__remove_dirty_segment(sbi, new_segno, PRE);
1649351df4b2SJaegeuk Kim 	__remove_dirty_segment(sbi, new_segno, DIRTY);
1650351df4b2SJaegeuk Kim 	mutex_unlock(&dirty_i->seglist_lock);
1651351df4b2SJaegeuk Kim 
1652351df4b2SJaegeuk Kim 	reset_curseg(sbi, type, 1);
1653351df4b2SJaegeuk Kim 	curseg->alloc_type = SSR;
1654351df4b2SJaegeuk Kim 	__next_free_blkoff(sbi, curseg, 0);
1655351df4b2SJaegeuk Kim 
1656351df4b2SJaegeuk Kim 	if (reuse) {
1657351df4b2SJaegeuk Kim 		sum_page = get_sum_page(sbi, new_segno);
1658351df4b2SJaegeuk Kim 		sum_node = (struct f2fs_summary_block *)page_address(sum_page);
1659351df4b2SJaegeuk Kim 		memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
1660351df4b2SJaegeuk Kim 		f2fs_put_page(sum_page, 1);
1661351df4b2SJaegeuk Kim 	}
1662351df4b2SJaegeuk Kim }
1663351df4b2SJaegeuk Kim 
166443727527SJaegeuk Kim static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
166543727527SJaegeuk Kim {
166643727527SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, type);
166743727527SJaegeuk Kim 	const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
1668d27c3d89SChao Yu 	int i, cnt;
1669d27c3d89SChao Yu 	bool reversed = false;
1670c192f7a4SJaegeuk Kim 
1671c192f7a4SJaegeuk Kim 	/* need_SSR() already forces to do this */
1672c192f7a4SJaegeuk Kim 	if (v_ops->get_victim(sbi, &(curseg)->next_segno, BG_GC, type, SSR))
1673c192f7a4SJaegeuk Kim 		return 1;
167443727527SJaegeuk Kim 
167570d625cbSJaegeuk Kim 	/* For node segments, let's do SSR more intensively */
167670d625cbSJaegeuk Kim 	if (IS_NODESEG(type)) {
1677d27c3d89SChao Yu 		if (type >= CURSEG_WARM_NODE) {
1678d27c3d89SChao Yu 			reversed = true;
1679d27c3d89SChao Yu 			i = CURSEG_COLD_NODE;
1680d27c3d89SChao Yu 		} else {
168170d625cbSJaegeuk Kim 			i = CURSEG_HOT_NODE;
1682d27c3d89SChao Yu 		}
1683d27c3d89SChao Yu 		cnt = NR_CURSEG_NODE_TYPE;
1684d27c3d89SChao Yu 	} else {
1685d27c3d89SChao Yu 		if (type >= CURSEG_WARM_DATA) {
1686d27c3d89SChao Yu 			reversed = true;
1687d27c3d89SChao Yu 			i = CURSEG_COLD_DATA;
168870d625cbSJaegeuk Kim 		} else {
168970d625cbSJaegeuk Kim 			i = CURSEG_HOT_DATA;
1690d27c3d89SChao Yu 		}
1691d27c3d89SChao Yu 		cnt = NR_CURSEG_DATA_TYPE;
169270d625cbSJaegeuk Kim 	}
169343727527SJaegeuk Kim 
1694d27c3d89SChao Yu 	for (; cnt-- > 0; reversed ? i-- : i++) {
1695c192f7a4SJaegeuk Kim 		if (i == type)
1696c192f7a4SJaegeuk Kim 			continue;
169743727527SJaegeuk Kim 		if (v_ops->get_victim(sbi, &(curseg)->next_segno,
1698c192f7a4SJaegeuk Kim 						BG_GC, i, SSR))
169943727527SJaegeuk Kim 			return 1;
1700c192f7a4SJaegeuk Kim 	}
170143727527SJaegeuk Kim 	return 0;
170243727527SJaegeuk Kim }
170343727527SJaegeuk Kim 
1704351df4b2SJaegeuk Kim /*
1705351df4b2SJaegeuk Kim  * flush out current segment and replace it with new segment
1706351df4b2SJaegeuk Kim  * This function should be returned with success, otherwise BUG
1707351df4b2SJaegeuk Kim  */
1708351df4b2SJaegeuk Kim static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
1709351df4b2SJaegeuk Kim 						int type, bool force)
1710351df4b2SJaegeuk Kim {
17117b405275SGu Zheng 	if (force)
1712351df4b2SJaegeuk Kim 		new_curseg(sbi, type, true);
17135b6c6be2SJaegeuk Kim 	else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
17145b6c6be2SJaegeuk Kim 					type == CURSEG_WARM_NODE)
1715351df4b2SJaegeuk Kim 		new_curseg(sbi, type, false);
1716351df4b2SJaegeuk Kim 	else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
1717351df4b2SJaegeuk Kim 		change_curseg(sbi, type, true);
1718351df4b2SJaegeuk Kim 	else
1719351df4b2SJaegeuk Kim 		new_curseg(sbi, type, false);
1720dcdfff65SJaegeuk Kim 
1721d0db7703SJaegeuk Kim 	stat_inc_seg_type(sbi, CURSEG_I(sbi, type));
1722351df4b2SJaegeuk Kim }
1723351df4b2SJaegeuk Kim 
1724351df4b2SJaegeuk Kim void allocate_new_segments(struct f2fs_sb_info *sbi)
1725351df4b2SJaegeuk Kim {
17266ae1be13SJaegeuk Kim 	struct curseg_info *curseg;
17276ae1be13SJaegeuk Kim 	unsigned int old_segno;
1728351df4b2SJaegeuk Kim 	int i;
1729351df4b2SJaegeuk Kim 
17306ae1be13SJaegeuk Kim 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
17316ae1be13SJaegeuk Kim 		curseg = CURSEG_I(sbi, i);
17326ae1be13SJaegeuk Kim 		old_segno = curseg->segno;
17336ae1be13SJaegeuk Kim 		SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
17346ae1be13SJaegeuk Kim 		locate_dirty_segment(sbi, old_segno);
17356ae1be13SJaegeuk Kim 	}
1736351df4b2SJaegeuk Kim }
1737351df4b2SJaegeuk Kim 
1738351df4b2SJaegeuk Kim static const struct segment_allocation default_salloc_ops = {
1739351df4b2SJaegeuk Kim 	.allocate_segment = allocate_segment_by_default,
1740351df4b2SJaegeuk Kim };
1741351df4b2SJaegeuk Kim 
174225290fa5SJaegeuk Kim bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
174325290fa5SJaegeuk Kim {
174425290fa5SJaegeuk Kim 	__u64 trim_start = cpc->trim_start;
174525290fa5SJaegeuk Kim 	bool has_candidate = false;
174625290fa5SJaegeuk Kim 
174725290fa5SJaegeuk Kim 	mutex_lock(&SIT_I(sbi)->sentry_lock);
174825290fa5SJaegeuk Kim 	for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
174925290fa5SJaegeuk Kim 		if (add_discard_addrs(sbi, cpc, true)) {
175025290fa5SJaegeuk Kim 			has_candidate = true;
175125290fa5SJaegeuk Kim 			break;
175225290fa5SJaegeuk Kim 		}
175325290fa5SJaegeuk Kim 	}
175425290fa5SJaegeuk Kim 	mutex_unlock(&SIT_I(sbi)->sentry_lock);
175525290fa5SJaegeuk Kim 
175625290fa5SJaegeuk Kim 	cpc->trim_start = trim_start;
175725290fa5SJaegeuk Kim 	return has_candidate;
175825290fa5SJaegeuk Kim }
175925290fa5SJaegeuk Kim 
17604b2fecc8SJaegeuk Kim int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
17614b2fecc8SJaegeuk Kim {
1762f7ef9b83SJaegeuk Kim 	__u64 start = F2FS_BYTES_TO_BLK(range->start);
1763f7ef9b83SJaegeuk Kim 	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
17644b2fecc8SJaegeuk Kim 	unsigned int start_segno, end_segno;
17654b2fecc8SJaegeuk Kim 	struct cp_control cpc;
1766c34f42e2SChao Yu 	int err = 0;
17674b2fecc8SJaegeuk Kim 
1768836b5a63SJaegeuk Kim 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
17694b2fecc8SJaegeuk Kim 		return -EINVAL;
17704b2fecc8SJaegeuk Kim 
17719bd27ae4SJan Kara 	cpc.trimmed = 0;
17727cd8558bSJaegeuk Kim 	if (end <= MAIN_BLKADDR(sbi))
17734b2fecc8SJaegeuk Kim 		goto out;
17744b2fecc8SJaegeuk Kim 
1775ed214a11SYunlei He 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1776ed214a11SYunlei He 		f2fs_msg(sbi->sb, KERN_WARNING,
1777ed214a11SYunlei He 			"Found FS corruption, run fsck to fix.");
1778ed214a11SYunlei He 		goto out;
1779ed214a11SYunlei He 	}
1780ed214a11SYunlei He 
17814b2fecc8SJaegeuk Kim 	/* start/end segment number in main_area */
17827cd8558bSJaegeuk Kim 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
17837cd8558bSJaegeuk Kim 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
17847cd8558bSJaegeuk Kim 						GET_SEGNO(sbi, end);
17854b2fecc8SJaegeuk Kim 	cpc.reason = CP_DISCARD;
1786836b5a63SJaegeuk Kim 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
17874b2fecc8SJaegeuk Kim 
17884b2fecc8SJaegeuk Kim 	/* do checkpoint to issue discard commands safely */
1789bba681cbSJaegeuk Kim 	for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
1790bba681cbSJaegeuk Kim 		cpc.trim_start = start_segno;
1791a66cdd98SJaegeuk Kim 
1792a66cdd98SJaegeuk Kim 		if (sbi->discard_blks == 0)
1793a66cdd98SJaegeuk Kim 			break;
1794a66cdd98SJaegeuk Kim 		else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
1795a66cdd98SJaegeuk Kim 			cpc.trim_end = end_segno;
1796a66cdd98SJaegeuk Kim 		else
1797a66cdd98SJaegeuk Kim 			cpc.trim_end = min_t(unsigned int,
1798a66cdd98SJaegeuk Kim 				rounddown(start_segno +
1799bba681cbSJaegeuk Kim 				BATCHED_TRIM_SEGMENTS(sbi),
1800bba681cbSJaegeuk Kim 				sbi->segs_per_sec) - 1, end_segno);
1801bba681cbSJaegeuk Kim 
1802ca4b02eeSJaegeuk Kim 		mutex_lock(&sbi->gc_mutex);
1803c34f42e2SChao Yu 		err = write_checkpoint(sbi, &cpc);
1804ca4b02eeSJaegeuk Kim 		mutex_unlock(&sbi->gc_mutex);
1805e9328353SChao Yu 		if (err)
1806e9328353SChao Yu 			break;
180774fa5f3dSChao Yu 
180874fa5f3dSChao Yu 		schedule();
1809bba681cbSJaegeuk Kim 	}
18104b2fecc8SJaegeuk Kim out:
1811f7ef9b83SJaegeuk Kim 	range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
1812c34f42e2SChao Yu 	return err;
18134b2fecc8SJaegeuk Kim }
18144b2fecc8SJaegeuk Kim 
1815351df4b2SJaegeuk Kim static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
1816351df4b2SJaegeuk Kim {
1817351df4b2SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1818351df4b2SJaegeuk Kim 	if (curseg->next_blkoff < sbi->blocks_per_seg)
1819351df4b2SJaegeuk Kim 		return true;
1820351df4b2SJaegeuk Kim 	return false;
1821351df4b2SJaegeuk Kim }
1822351df4b2SJaegeuk Kim 
1823351df4b2SJaegeuk Kim static int __get_segment_type_2(struct page *page, enum page_type p_type)
1824351df4b2SJaegeuk Kim {
1825351df4b2SJaegeuk Kim 	if (p_type == DATA)
1826351df4b2SJaegeuk Kim 		return CURSEG_HOT_DATA;
1827351df4b2SJaegeuk Kim 	else
1828351df4b2SJaegeuk Kim 		return CURSEG_HOT_NODE;
1829351df4b2SJaegeuk Kim }
1830351df4b2SJaegeuk Kim 
1831351df4b2SJaegeuk Kim static int __get_segment_type_4(struct page *page, enum page_type p_type)
1832351df4b2SJaegeuk Kim {
1833351df4b2SJaegeuk Kim 	if (p_type == DATA) {
1834351df4b2SJaegeuk Kim 		struct inode *inode = page->mapping->host;
1835351df4b2SJaegeuk Kim 
1836351df4b2SJaegeuk Kim 		if (S_ISDIR(inode->i_mode))
1837351df4b2SJaegeuk Kim 			return CURSEG_HOT_DATA;
1838351df4b2SJaegeuk Kim 		else
1839351df4b2SJaegeuk Kim 			return CURSEG_COLD_DATA;
1840351df4b2SJaegeuk Kim 	} else {
1841a344b9fdSJaegeuk Kim 		if (IS_DNODE(page) && is_cold_node(page))
1842a344b9fdSJaegeuk Kim 			return CURSEG_WARM_NODE;
1843351df4b2SJaegeuk Kim 		else
1844351df4b2SJaegeuk Kim 			return CURSEG_COLD_NODE;
1845351df4b2SJaegeuk Kim 	}
1846351df4b2SJaegeuk Kim }
1847351df4b2SJaegeuk Kim 
1848351df4b2SJaegeuk Kim static int __get_segment_type_6(struct page *page, enum page_type p_type)
1849351df4b2SJaegeuk Kim {
1850351df4b2SJaegeuk Kim 	if (p_type == DATA) {
1851351df4b2SJaegeuk Kim 		struct inode *inode = page->mapping->host;
1852351df4b2SJaegeuk Kim 
1853ef095d19SJaegeuk Kim 		if (is_cold_data(page) || file_is_cold(inode))
1854351df4b2SJaegeuk Kim 			return CURSEG_COLD_DATA;
1855ef095d19SJaegeuk Kim 		if (is_inode_flag_set(inode, FI_HOT_DATA))
1856ef095d19SJaegeuk Kim 			return CURSEG_HOT_DATA;
1857351df4b2SJaegeuk Kim 		return CURSEG_WARM_DATA;
1858351df4b2SJaegeuk Kim 	} else {
1859351df4b2SJaegeuk Kim 		if (IS_DNODE(page))
1860351df4b2SJaegeuk Kim 			return is_cold_node(page) ? CURSEG_WARM_NODE :
1861351df4b2SJaegeuk Kim 						CURSEG_HOT_NODE;
1862351df4b2SJaegeuk Kim 		return CURSEG_COLD_NODE;
1863351df4b2SJaegeuk Kim 	}
1864351df4b2SJaegeuk Kim }
1865351df4b2SJaegeuk Kim 
1866351df4b2SJaegeuk Kim static int __get_segment_type(struct page *page, enum page_type p_type)
1867351df4b2SJaegeuk Kim {
18684081363fSJaegeuk Kim 	switch (F2FS_P_SB(page)->active_logs) {
1869351df4b2SJaegeuk Kim 	case 2:
1870351df4b2SJaegeuk Kim 		return __get_segment_type_2(page, p_type);
1871351df4b2SJaegeuk Kim 	case 4:
1872351df4b2SJaegeuk Kim 		return __get_segment_type_4(page, p_type);
1873351df4b2SJaegeuk Kim 	}
187412a67146SJaegeuk Kim 	/* NR_CURSEG_TYPE(6) logs by default */
18759850cf4aSJaegeuk Kim 	f2fs_bug_on(F2FS_P_SB(page),
18769850cf4aSJaegeuk Kim 		F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
187712a67146SJaegeuk Kim 	return __get_segment_type_6(page, p_type);
1878351df4b2SJaegeuk Kim }
1879351df4b2SJaegeuk Kim 
1880bfad7c2dSJaegeuk Kim void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
1881351df4b2SJaegeuk Kim 		block_t old_blkaddr, block_t *new_blkaddr,
1882bfad7c2dSJaegeuk Kim 		struct f2fs_summary *sum, int type)
1883351df4b2SJaegeuk Kim {
1884351df4b2SJaegeuk Kim 	struct sit_info *sit_i = SIT_I(sbi);
18856ae1be13SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, type);
1886351df4b2SJaegeuk Kim 
1887351df4b2SJaegeuk Kim 	mutex_lock(&curseg->curseg_mutex);
188821cb1d99SJaegeuk Kim 	mutex_lock(&sit_i->sentry_lock);
1889351df4b2SJaegeuk Kim 
1890351df4b2SJaegeuk Kim 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
1891351df4b2SJaegeuk Kim 
18924e6a8d9bSJaegeuk Kim 	f2fs_wait_discard_bio(sbi, *new_blkaddr);
18934e6a8d9bSJaegeuk Kim 
1894351df4b2SJaegeuk Kim 	/*
1895351df4b2SJaegeuk Kim 	 * __add_sum_entry should be resided under the curseg_mutex
1896351df4b2SJaegeuk Kim 	 * because, this function updates a summary entry in the
1897351df4b2SJaegeuk Kim 	 * current summary block.
1898351df4b2SJaegeuk Kim 	 */
1899e79efe3bSHaicheng Li 	__add_sum_entry(sbi, type, sum);
1900351df4b2SJaegeuk Kim 
1901351df4b2SJaegeuk Kim 	__refresh_next_blkoff(sbi, curseg);
1902dcdfff65SJaegeuk Kim 
1903dcdfff65SJaegeuk Kim 	stat_inc_block_count(sbi, curseg);
1904351df4b2SJaegeuk Kim 
19053436c4bdSYunlong Song 	if (!__has_curseg_space(sbi, type))
19063436c4bdSYunlong Song 		sit_i->s_ops->allocate_segment(sbi, type, false);
1907c6f82fe9SJaegeuk Kim 	/*
1908c6f82fe9SJaegeuk Kim 	 * SIT information should be updated after segment allocation,
1909c6f82fe9SJaegeuk Kim 	 * since we need to keep dirty segments precisely under SSR.
1910c6f82fe9SJaegeuk Kim 	 */
1911c6f82fe9SJaegeuk Kim 	refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
19123436c4bdSYunlong Song 
1913351df4b2SJaegeuk Kim 	mutex_unlock(&sit_i->sentry_lock);
1914351df4b2SJaegeuk Kim 
1915bfad7c2dSJaegeuk Kim 	if (page && IS_NODESEG(type))
1916351df4b2SJaegeuk Kim 		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
1917351df4b2SJaegeuk Kim 
1918bfad7c2dSJaegeuk Kim 	mutex_unlock(&curseg->curseg_mutex);
1919bfad7c2dSJaegeuk Kim }
1920bfad7c2dSJaegeuk Kim 
192105ca3632SJaegeuk Kim static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
1922bfad7c2dSJaegeuk Kim {
192305ca3632SJaegeuk Kim 	int type = __get_segment_type(fio->page, fio->type);
19240a595ebaSJaegeuk Kim 	int err;
1925bfad7c2dSJaegeuk Kim 
19267dfeaa32SJaegeuk Kim 	if (fio->type == NODE || fio->type == DATA)
19277dfeaa32SJaegeuk Kim 		mutex_lock(&fio->sbi->wio_mutex[fio->type]);
19280a595ebaSJaegeuk Kim reallocate:
19297a9d7548SChao Yu 	allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
19307a9d7548SChao Yu 					&fio->new_blkaddr, sum, type);
1931bfad7c2dSJaegeuk Kim 
1932351df4b2SJaegeuk Kim 	/* writeout dirty page into bdev */
19330a595ebaSJaegeuk Kim 	err = f2fs_submit_page_mbio(fio);
19340a595ebaSJaegeuk Kim 	if (err == -EAGAIN) {
19350a595ebaSJaegeuk Kim 		fio->old_blkaddr = fio->new_blkaddr;
19360a595ebaSJaegeuk Kim 		goto reallocate;
19370a595ebaSJaegeuk Kim 	}
19387dfeaa32SJaegeuk Kim 
19397dfeaa32SJaegeuk Kim 	if (fio->type == NODE || fio->type == DATA)
19407dfeaa32SJaegeuk Kim 		mutex_unlock(&fio->sbi->wio_mutex[fio->type]);
1941351df4b2SJaegeuk Kim }
1942351df4b2SJaegeuk Kim 
1943577e3495SJaegeuk Kim void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
1944351df4b2SJaegeuk Kim {
1945458e6197SJaegeuk Kim 	struct f2fs_io_info fio = {
194605ca3632SJaegeuk Kim 		.sbi = sbi,
1947458e6197SJaegeuk Kim 		.type = META,
194804d328deSMike Christie 		.op = REQ_OP_WRITE,
194970fd7614SChristoph Hellwig 		.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
19507a9d7548SChao Yu 		.old_blkaddr = page->index,
19517a9d7548SChao Yu 		.new_blkaddr = page->index,
195205ca3632SJaegeuk Kim 		.page = page,
19534375a336SJaegeuk Kim 		.encrypted_page = NULL,
1954458e6197SJaegeuk Kim 	};
1955458e6197SJaegeuk Kim 
19562b947003SChao Yu 	if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
195704d328deSMike Christie 		fio.op_flags &= ~REQ_META;
19582b947003SChao Yu 
1959351df4b2SJaegeuk Kim 	set_page_writeback(page);
196005ca3632SJaegeuk Kim 	f2fs_submit_page_mbio(&fio);
1961351df4b2SJaegeuk Kim }
1962351df4b2SJaegeuk Kim 
196305ca3632SJaegeuk Kim void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
1964351df4b2SJaegeuk Kim {
1965351df4b2SJaegeuk Kim 	struct f2fs_summary sum;
196605ca3632SJaegeuk Kim 
1967351df4b2SJaegeuk Kim 	set_summary(&sum, nid, 0, 0);
196805ca3632SJaegeuk Kim 	do_write_page(&sum, fio);
1969351df4b2SJaegeuk Kim }
1970351df4b2SJaegeuk Kim 
197105ca3632SJaegeuk Kim void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
1972351df4b2SJaegeuk Kim {
197305ca3632SJaegeuk Kim 	struct f2fs_sb_info *sbi = fio->sbi;
1974351df4b2SJaegeuk Kim 	struct f2fs_summary sum;
1975351df4b2SJaegeuk Kim 	struct node_info ni;
1976351df4b2SJaegeuk Kim 
19779850cf4aSJaegeuk Kim 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
1978351df4b2SJaegeuk Kim 	get_node_info(sbi, dn->nid, &ni);
1979351df4b2SJaegeuk Kim 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
198005ca3632SJaegeuk Kim 	do_write_page(&sum, fio);
1981f28b3434SChao Yu 	f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
1982351df4b2SJaegeuk Kim }
1983351df4b2SJaegeuk Kim 
1984d1b3e72dSJaegeuk Kim int rewrite_data_page(struct f2fs_io_info *fio)
1985351df4b2SJaegeuk Kim {
19867a9d7548SChao Yu 	fio->new_blkaddr = fio->old_blkaddr;
198705ca3632SJaegeuk Kim 	stat_inc_inplace_blocks(fio->sbi);
1988d1b3e72dSJaegeuk Kim 	return f2fs_submit_page_bio(fio);
1989351df4b2SJaegeuk Kim }
1990351df4b2SJaegeuk Kim 
19914356e48eSChao Yu void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
199219f106bcSChao Yu 				block_t old_blkaddr, block_t new_blkaddr,
199328bc106bSChao Yu 				bool recover_curseg, bool recover_newaddr)
1994351df4b2SJaegeuk Kim {
1995351df4b2SJaegeuk Kim 	struct sit_info *sit_i = SIT_I(sbi);
1996351df4b2SJaegeuk Kim 	struct curseg_info *curseg;
1997351df4b2SJaegeuk Kim 	unsigned int segno, old_cursegno;
1998351df4b2SJaegeuk Kim 	struct seg_entry *se;
1999351df4b2SJaegeuk Kim 	int type;
200019f106bcSChao Yu 	unsigned short old_blkoff;
2001351df4b2SJaegeuk Kim 
2002351df4b2SJaegeuk Kim 	segno = GET_SEGNO(sbi, new_blkaddr);
2003351df4b2SJaegeuk Kim 	se = get_seg_entry(sbi, segno);
2004351df4b2SJaegeuk Kim 	type = se->type;
2005351df4b2SJaegeuk Kim 
200619f106bcSChao Yu 	if (!recover_curseg) {
200719f106bcSChao Yu 		/* for recovery flow */
2008351df4b2SJaegeuk Kim 		if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
2009351df4b2SJaegeuk Kim 			if (old_blkaddr == NULL_ADDR)
2010351df4b2SJaegeuk Kim 				type = CURSEG_COLD_DATA;
2011351df4b2SJaegeuk Kim 			else
2012351df4b2SJaegeuk Kim 				type = CURSEG_WARM_DATA;
2013351df4b2SJaegeuk Kim 		}
201419f106bcSChao Yu 	} else {
201519f106bcSChao Yu 		if (!IS_CURSEG(sbi, segno))
201619f106bcSChao Yu 			type = CURSEG_WARM_DATA;
201719f106bcSChao Yu 	}
201819f106bcSChao Yu 
2019351df4b2SJaegeuk Kim 	curseg = CURSEG_I(sbi, type);
2020351df4b2SJaegeuk Kim 
2021351df4b2SJaegeuk Kim 	mutex_lock(&curseg->curseg_mutex);
2022351df4b2SJaegeuk Kim 	mutex_lock(&sit_i->sentry_lock);
2023351df4b2SJaegeuk Kim 
2024351df4b2SJaegeuk Kim 	old_cursegno = curseg->segno;
202519f106bcSChao Yu 	old_blkoff = curseg->next_blkoff;
2026351df4b2SJaegeuk Kim 
2027351df4b2SJaegeuk Kim 	/* change the current segment */
2028351df4b2SJaegeuk Kim 	if (segno != curseg->segno) {
2029351df4b2SJaegeuk Kim 		curseg->next_segno = segno;
2030351df4b2SJaegeuk Kim 		change_curseg(sbi, type, true);
2031351df4b2SJaegeuk Kim 	}
2032351df4b2SJaegeuk Kim 
2033491c0854SJaegeuk Kim 	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
2034e79efe3bSHaicheng Li 	__add_sum_entry(sbi, type, sum);
2035351df4b2SJaegeuk Kim 
203628bc106bSChao Yu 	if (!recover_curseg || recover_newaddr)
20376e2c64adSJaegeuk Kim 		update_sit_entry(sbi, new_blkaddr, 1);
20386e2c64adSJaegeuk Kim 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
20396e2c64adSJaegeuk Kim 		update_sit_entry(sbi, old_blkaddr, -1);
20406e2c64adSJaegeuk Kim 
20416e2c64adSJaegeuk Kim 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
20426e2c64adSJaegeuk Kim 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
20436e2c64adSJaegeuk Kim 
2044351df4b2SJaegeuk Kim 	locate_dirty_segment(sbi, old_cursegno);
2045351df4b2SJaegeuk Kim 
204619f106bcSChao Yu 	if (recover_curseg) {
204719f106bcSChao Yu 		if (old_cursegno != curseg->segno) {
204819f106bcSChao Yu 			curseg->next_segno = old_cursegno;
204919f106bcSChao Yu 			change_curseg(sbi, type, true);
205019f106bcSChao Yu 		}
205119f106bcSChao Yu 		curseg->next_blkoff = old_blkoff;
205219f106bcSChao Yu 	}
205319f106bcSChao Yu 
2054351df4b2SJaegeuk Kim 	mutex_unlock(&sit_i->sentry_lock);
2055351df4b2SJaegeuk Kim 	mutex_unlock(&curseg->curseg_mutex);
2056351df4b2SJaegeuk Kim }
2057351df4b2SJaegeuk Kim 
2058528e3459SChao Yu void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
2059528e3459SChao Yu 				block_t old_addr, block_t new_addr,
206028bc106bSChao Yu 				unsigned char version, bool recover_curseg,
206128bc106bSChao Yu 				bool recover_newaddr)
2062528e3459SChao Yu {
2063528e3459SChao Yu 	struct f2fs_summary sum;
2064528e3459SChao Yu 
2065528e3459SChao Yu 	set_summary(&sum, dn->nid, dn->ofs_in_node, version);
2066528e3459SChao Yu 
206728bc106bSChao Yu 	__f2fs_replace_block(sbi, &sum, old_addr, new_addr,
206828bc106bSChao Yu 					recover_curseg, recover_newaddr);
2069528e3459SChao Yu 
2070f28b3434SChao Yu 	f2fs_update_data_blkaddr(dn, new_addr);
2071528e3459SChao Yu }
2072528e3459SChao Yu 
207393dfe2acSJaegeuk Kim void f2fs_wait_on_page_writeback(struct page *page,
2074fec1d657SJaegeuk Kim 				enum page_type type, bool ordered)
207593dfe2acSJaegeuk Kim {
207693dfe2acSJaegeuk Kim 	if (PageWriteback(page)) {
20774081363fSJaegeuk Kim 		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
20784081363fSJaegeuk Kim 
2079942fd319SJaegeuk Kim 		f2fs_submit_merged_bio_cond(sbi, page->mapping->host,
2080942fd319SJaegeuk Kim 						0, page->index, type, WRITE);
2081fec1d657SJaegeuk Kim 		if (ordered)
208293dfe2acSJaegeuk Kim 			wait_on_page_writeback(page);
2083fec1d657SJaegeuk Kim 		else
2084fec1d657SJaegeuk Kim 			wait_for_stable_page(page);
208593dfe2acSJaegeuk Kim 	}
208693dfe2acSJaegeuk Kim }
208793dfe2acSJaegeuk Kim 
208808b39fbdSChao Yu void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
208908b39fbdSChao Yu 							block_t blkaddr)
209008b39fbdSChao Yu {
209108b39fbdSChao Yu 	struct page *cpage;
209208b39fbdSChao Yu 
20935d4c0af4SYunlei He 	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
209408b39fbdSChao Yu 		return;
209508b39fbdSChao Yu 
209608b39fbdSChao Yu 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
209708b39fbdSChao Yu 	if (cpage) {
2098fec1d657SJaegeuk Kim 		f2fs_wait_on_page_writeback(cpage, DATA, true);
209908b39fbdSChao Yu 		f2fs_put_page(cpage, 1);
210008b39fbdSChao Yu 	}
210108b39fbdSChao Yu }
210208b39fbdSChao Yu 
2103351df4b2SJaegeuk Kim static int read_compacted_summaries(struct f2fs_sb_info *sbi)
2104351df4b2SJaegeuk Kim {
2105351df4b2SJaegeuk Kim 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2106351df4b2SJaegeuk Kim 	struct curseg_info *seg_i;
2107351df4b2SJaegeuk Kim 	unsigned char *kaddr;
2108351df4b2SJaegeuk Kim 	struct page *page;
2109351df4b2SJaegeuk Kim 	block_t start;
2110351df4b2SJaegeuk Kim 	int i, j, offset;
2111351df4b2SJaegeuk Kim 
2112351df4b2SJaegeuk Kim 	start = start_sum_block(sbi);
2113351df4b2SJaegeuk Kim 
2114351df4b2SJaegeuk Kim 	page = get_meta_page(sbi, start++);
2115351df4b2SJaegeuk Kim 	kaddr = (unsigned char *)page_address(page);
2116351df4b2SJaegeuk Kim 
2117351df4b2SJaegeuk Kim 	/* Step 1: restore nat cache */
2118351df4b2SJaegeuk Kim 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
2119b7ad7512SChao Yu 	memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
2120351df4b2SJaegeuk Kim 
2121351df4b2SJaegeuk Kim 	/* Step 2: restore sit cache */
2122351df4b2SJaegeuk Kim 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
2123b7ad7512SChao Yu 	memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
2124351df4b2SJaegeuk Kim 	offset = 2 * SUM_JOURNAL_SIZE;
2125351df4b2SJaegeuk Kim 
2126351df4b2SJaegeuk Kim 	/* Step 3: restore summary entries */
2127351df4b2SJaegeuk Kim 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2128351df4b2SJaegeuk Kim 		unsigned short blk_off;
2129351df4b2SJaegeuk Kim 		unsigned int segno;
2130351df4b2SJaegeuk Kim 
2131351df4b2SJaegeuk Kim 		seg_i = CURSEG_I(sbi, i);
2132351df4b2SJaegeuk Kim 		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
2133351df4b2SJaegeuk Kim 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
2134351df4b2SJaegeuk Kim 		seg_i->next_segno = segno;
2135351df4b2SJaegeuk Kim 		reset_curseg(sbi, i, 0);
2136351df4b2SJaegeuk Kim 		seg_i->alloc_type = ckpt->alloc_type[i];
2137351df4b2SJaegeuk Kim 		seg_i->next_blkoff = blk_off;
2138351df4b2SJaegeuk Kim 
2139351df4b2SJaegeuk Kim 		if (seg_i->alloc_type == SSR)
2140351df4b2SJaegeuk Kim 			blk_off = sbi->blocks_per_seg;
2141351df4b2SJaegeuk Kim 
2142351df4b2SJaegeuk Kim 		for (j = 0; j < blk_off; j++) {
2143351df4b2SJaegeuk Kim 			struct f2fs_summary *s;
2144351df4b2SJaegeuk Kim 			s = (struct f2fs_summary *)(kaddr + offset);
2145351df4b2SJaegeuk Kim 			seg_i->sum_blk->entries[j] = *s;
2146351df4b2SJaegeuk Kim 			offset += SUMMARY_SIZE;
214709cbfeafSKirill A. Shutemov 			if (offset + SUMMARY_SIZE <= PAGE_SIZE -
2148351df4b2SJaegeuk Kim 						SUM_FOOTER_SIZE)
2149351df4b2SJaegeuk Kim 				continue;
2150351df4b2SJaegeuk Kim 
2151351df4b2SJaegeuk Kim 			f2fs_put_page(page, 1);
2152351df4b2SJaegeuk Kim 			page = NULL;
2153351df4b2SJaegeuk Kim 
2154351df4b2SJaegeuk Kim 			page = get_meta_page(sbi, start++);
2155351df4b2SJaegeuk Kim 			kaddr = (unsigned char *)page_address(page);
2156351df4b2SJaegeuk Kim 			offset = 0;
2157351df4b2SJaegeuk Kim 		}
2158351df4b2SJaegeuk Kim 	}
2159351df4b2SJaegeuk Kim 	f2fs_put_page(page, 1);
2160351df4b2SJaegeuk Kim 	return 0;
2161351df4b2SJaegeuk Kim }
2162351df4b2SJaegeuk Kim 
2163351df4b2SJaegeuk Kim static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
2164351df4b2SJaegeuk Kim {
2165351df4b2SJaegeuk Kim 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2166351df4b2SJaegeuk Kim 	struct f2fs_summary_block *sum;
2167351df4b2SJaegeuk Kim 	struct curseg_info *curseg;
2168351df4b2SJaegeuk Kim 	struct page *new;
2169351df4b2SJaegeuk Kim 	unsigned short blk_off;
2170351df4b2SJaegeuk Kim 	unsigned int segno = 0;
2171351df4b2SJaegeuk Kim 	block_t blk_addr = 0;
2172351df4b2SJaegeuk Kim 
2173351df4b2SJaegeuk Kim 	/* get segment number and block addr */
2174351df4b2SJaegeuk Kim 	if (IS_DATASEG(type)) {
2175351df4b2SJaegeuk Kim 		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
2176351df4b2SJaegeuk Kim 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
2177351df4b2SJaegeuk Kim 							CURSEG_HOT_DATA]);
2178119ee914SJaegeuk Kim 		if (__exist_node_summaries(sbi))
2179351df4b2SJaegeuk Kim 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
2180351df4b2SJaegeuk Kim 		else
2181351df4b2SJaegeuk Kim 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
2182351df4b2SJaegeuk Kim 	} else {
2183351df4b2SJaegeuk Kim 		segno = le32_to_cpu(ckpt->cur_node_segno[type -
2184351df4b2SJaegeuk Kim 							CURSEG_HOT_NODE]);
2185351df4b2SJaegeuk Kim 		blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
2186351df4b2SJaegeuk Kim 							CURSEG_HOT_NODE]);
2187119ee914SJaegeuk Kim 		if (__exist_node_summaries(sbi))
2188351df4b2SJaegeuk Kim 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
2189351df4b2SJaegeuk Kim 							type - CURSEG_HOT_NODE);
2190351df4b2SJaegeuk Kim 		else
2191351df4b2SJaegeuk Kim 			blk_addr = GET_SUM_BLOCK(sbi, segno);
2192351df4b2SJaegeuk Kim 	}
2193351df4b2SJaegeuk Kim 
2194351df4b2SJaegeuk Kim 	new = get_meta_page(sbi, blk_addr);
2195351df4b2SJaegeuk Kim 	sum = (struct f2fs_summary_block *)page_address(new);
2196351df4b2SJaegeuk Kim 
2197351df4b2SJaegeuk Kim 	if (IS_NODESEG(type)) {
2198119ee914SJaegeuk Kim 		if (__exist_node_summaries(sbi)) {
2199351df4b2SJaegeuk Kim 			struct f2fs_summary *ns = &sum->entries[0];
2200351df4b2SJaegeuk Kim 			int i;
2201351df4b2SJaegeuk Kim 			for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
2202351df4b2SJaegeuk Kim 				ns->version = 0;
2203351df4b2SJaegeuk Kim 				ns->ofs_in_node = 0;
2204351df4b2SJaegeuk Kim 			}
2205351df4b2SJaegeuk Kim 		} else {
2206d653788aSGu Zheng 			int err;
2207d653788aSGu Zheng 
2208d653788aSGu Zheng 			err = restore_node_summary(sbi, segno, sum);
2209d653788aSGu Zheng 			if (err) {
2210351df4b2SJaegeuk Kim 				f2fs_put_page(new, 1);
2211d653788aSGu Zheng 				return err;
2212351df4b2SJaegeuk Kim 			}
2213351df4b2SJaegeuk Kim 		}
2214351df4b2SJaegeuk Kim 	}
2215351df4b2SJaegeuk Kim 
2216351df4b2SJaegeuk Kim 	/* set uncompleted segment to curseg */
2217351df4b2SJaegeuk Kim 	curseg = CURSEG_I(sbi, type);
2218351df4b2SJaegeuk Kim 	mutex_lock(&curseg->curseg_mutex);
2219b7ad7512SChao Yu 
2220b7ad7512SChao Yu 	/* update journal info */
2221b7ad7512SChao Yu 	down_write(&curseg->journal_rwsem);
2222b7ad7512SChao Yu 	memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
2223b7ad7512SChao Yu 	up_write(&curseg->journal_rwsem);
2224b7ad7512SChao Yu 
2225b7ad7512SChao Yu 	memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
2226b7ad7512SChao Yu 	memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
2227351df4b2SJaegeuk Kim 	curseg->next_segno = segno;
2228351df4b2SJaegeuk Kim 	reset_curseg(sbi, type, 0);
2229351df4b2SJaegeuk Kim 	curseg->alloc_type = ckpt->alloc_type[type];
2230351df4b2SJaegeuk Kim 	curseg->next_blkoff = blk_off;
2231351df4b2SJaegeuk Kim 	mutex_unlock(&curseg->curseg_mutex);
2232351df4b2SJaegeuk Kim 	f2fs_put_page(new, 1);
2233351df4b2SJaegeuk Kim 	return 0;
2234351df4b2SJaegeuk Kim }
2235351df4b2SJaegeuk Kim 
2236351df4b2SJaegeuk Kim static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
2237351df4b2SJaegeuk Kim {
2238351df4b2SJaegeuk Kim 	int type = CURSEG_HOT_DATA;
2239e4fc5fbfSChao Yu 	int err;
2240351df4b2SJaegeuk Kim 
2241aaec2b1dSChao Yu 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
22423fa06d7bSChao Yu 		int npages = npages_for_summary_flush(sbi, true);
22433fa06d7bSChao Yu 
22443fa06d7bSChao Yu 		if (npages >= 2)
22453fa06d7bSChao Yu 			ra_meta_pages(sbi, start_sum_block(sbi), npages,
224626879fb1SChao Yu 							META_CP, true);
22473fa06d7bSChao Yu 
2248351df4b2SJaegeuk Kim 		/* restore for compacted data summary */
2249351df4b2SJaegeuk Kim 		if (read_compacted_summaries(sbi))
2250351df4b2SJaegeuk Kim 			return -EINVAL;
2251351df4b2SJaegeuk Kim 		type = CURSEG_HOT_NODE;
2252351df4b2SJaegeuk Kim 	}
2253351df4b2SJaegeuk Kim 
2254119ee914SJaegeuk Kim 	if (__exist_node_summaries(sbi))
22553fa06d7bSChao Yu 		ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
225626879fb1SChao Yu 					NR_CURSEG_TYPE - type, META_CP, true);
22573fa06d7bSChao Yu 
2258e4fc5fbfSChao Yu 	for (; type <= CURSEG_COLD_NODE; type++) {
2259e4fc5fbfSChao Yu 		err = read_normal_summaries(sbi, type);
2260e4fc5fbfSChao Yu 		if (err)
2261e4fc5fbfSChao Yu 			return err;
2262e4fc5fbfSChao Yu 	}
2263e4fc5fbfSChao Yu 
2264351df4b2SJaegeuk Kim 	return 0;
2265351df4b2SJaegeuk Kim }
2266351df4b2SJaegeuk Kim 
2267351df4b2SJaegeuk Kim static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
2268351df4b2SJaegeuk Kim {
2269351df4b2SJaegeuk Kim 	struct page *page;
2270351df4b2SJaegeuk Kim 	unsigned char *kaddr;
2271351df4b2SJaegeuk Kim 	struct f2fs_summary *summary;
2272351df4b2SJaegeuk Kim 	struct curseg_info *seg_i;
2273351df4b2SJaegeuk Kim 	int written_size = 0;
2274351df4b2SJaegeuk Kim 	int i, j;
2275351df4b2SJaegeuk Kim 
2276351df4b2SJaegeuk Kim 	page = grab_meta_page(sbi, blkaddr++);
2277351df4b2SJaegeuk Kim 	kaddr = (unsigned char *)page_address(page);
2278351df4b2SJaegeuk Kim 
2279351df4b2SJaegeuk Kim 	/* Step 1: write nat cache */
2280351df4b2SJaegeuk Kim 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
2281b7ad7512SChao Yu 	memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
2282351df4b2SJaegeuk Kim 	written_size += SUM_JOURNAL_SIZE;
2283351df4b2SJaegeuk Kim 
2284351df4b2SJaegeuk Kim 	/* Step 2: write sit cache */
2285351df4b2SJaegeuk Kim 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
2286b7ad7512SChao Yu 	memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
2287351df4b2SJaegeuk Kim 	written_size += SUM_JOURNAL_SIZE;
2288351df4b2SJaegeuk Kim 
2289351df4b2SJaegeuk Kim 	/* Step 3: write summary entries */
2290351df4b2SJaegeuk Kim 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2291351df4b2SJaegeuk Kim 		unsigned short blkoff;
2292351df4b2SJaegeuk Kim 		seg_i = CURSEG_I(sbi, i);
2293351df4b2SJaegeuk Kim 		if (sbi->ckpt->alloc_type[i] == SSR)
2294351df4b2SJaegeuk Kim 			blkoff = sbi->blocks_per_seg;
2295351df4b2SJaegeuk Kim 		else
2296351df4b2SJaegeuk Kim 			blkoff = curseg_blkoff(sbi, i);
2297351df4b2SJaegeuk Kim 
2298351df4b2SJaegeuk Kim 		for (j = 0; j < blkoff; j++) {
2299351df4b2SJaegeuk Kim 			if (!page) {
2300351df4b2SJaegeuk Kim 				page = grab_meta_page(sbi, blkaddr++);
2301351df4b2SJaegeuk Kim 				kaddr = (unsigned char *)page_address(page);
2302351df4b2SJaegeuk Kim 				written_size = 0;
2303351df4b2SJaegeuk Kim 			}
2304351df4b2SJaegeuk Kim 			summary = (struct f2fs_summary *)(kaddr + written_size);
2305351df4b2SJaegeuk Kim 			*summary = seg_i->sum_blk->entries[j];
2306351df4b2SJaegeuk Kim 			written_size += SUMMARY_SIZE;
2307351df4b2SJaegeuk Kim 
230809cbfeafSKirill A. Shutemov 			if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
2309351df4b2SJaegeuk Kim 							SUM_FOOTER_SIZE)
2310351df4b2SJaegeuk Kim 				continue;
2311351df4b2SJaegeuk Kim 
2312e8d61a74SChao Yu 			set_page_dirty(page);
2313351df4b2SJaegeuk Kim 			f2fs_put_page(page, 1);
2314351df4b2SJaegeuk Kim 			page = NULL;
2315351df4b2SJaegeuk Kim 		}
2316351df4b2SJaegeuk Kim 	}
2317e8d61a74SChao Yu 	if (page) {
2318e8d61a74SChao Yu 		set_page_dirty(page);
2319351df4b2SJaegeuk Kim 		f2fs_put_page(page, 1);
2320351df4b2SJaegeuk Kim 	}
2321e8d61a74SChao Yu }
2322351df4b2SJaegeuk Kim 
2323351df4b2SJaegeuk Kim static void write_normal_summaries(struct f2fs_sb_info *sbi,
2324351df4b2SJaegeuk Kim 					block_t blkaddr, int type)
2325351df4b2SJaegeuk Kim {
2326351df4b2SJaegeuk Kim 	int i, end;
2327351df4b2SJaegeuk Kim 	if (IS_DATASEG(type))
2328351df4b2SJaegeuk Kim 		end = type + NR_CURSEG_DATA_TYPE;
2329351df4b2SJaegeuk Kim 	else
2330351df4b2SJaegeuk Kim 		end = type + NR_CURSEG_NODE_TYPE;
2331351df4b2SJaegeuk Kim 
2332b7ad7512SChao Yu 	for (i = type; i < end; i++)
2333b7ad7512SChao Yu 		write_current_sum_page(sbi, i, blkaddr + (i - type));
2334351df4b2SJaegeuk Kim }
2335351df4b2SJaegeuk Kim 
2336351df4b2SJaegeuk Kim void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
2337351df4b2SJaegeuk Kim {
2338aaec2b1dSChao Yu 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
2339351df4b2SJaegeuk Kim 		write_compacted_summaries(sbi, start_blk);
2340351df4b2SJaegeuk Kim 	else
2341351df4b2SJaegeuk Kim 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
2342351df4b2SJaegeuk Kim }
2343351df4b2SJaegeuk Kim 
2344351df4b2SJaegeuk Kim void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
2345351df4b2SJaegeuk Kim {
2346351df4b2SJaegeuk Kim 	write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
2347351df4b2SJaegeuk Kim }
2348351df4b2SJaegeuk Kim 
2349dfc08a12SChao Yu int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
2350351df4b2SJaegeuk Kim 					unsigned int val, int alloc)
2351351df4b2SJaegeuk Kim {
2352351df4b2SJaegeuk Kim 	int i;
2353351df4b2SJaegeuk Kim 
2354351df4b2SJaegeuk Kim 	if (type == NAT_JOURNAL) {
2355dfc08a12SChao Yu 		for (i = 0; i < nats_in_cursum(journal); i++) {
2356dfc08a12SChao Yu 			if (le32_to_cpu(nid_in_journal(journal, i)) == val)
2357351df4b2SJaegeuk Kim 				return i;
2358351df4b2SJaegeuk Kim 		}
2359dfc08a12SChao Yu 		if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
2360dfc08a12SChao Yu 			return update_nats_in_cursum(journal, 1);
2361351df4b2SJaegeuk Kim 	} else if (type == SIT_JOURNAL) {
2362dfc08a12SChao Yu 		for (i = 0; i < sits_in_cursum(journal); i++)
2363dfc08a12SChao Yu 			if (le32_to_cpu(segno_in_journal(journal, i)) == val)
2364351df4b2SJaegeuk Kim 				return i;
2365dfc08a12SChao Yu 		if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
2366dfc08a12SChao Yu 			return update_sits_in_cursum(journal, 1);
2367351df4b2SJaegeuk Kim 	}
2368351df4b2SJaegeuk Kim 	return -1;
2369351df4b2SJaegeuk Kim }
2370351df4b2SJaegeuk Kim 
2371351df4b2SJaegeuk Kim static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
2372351df4b2SJaegeuk Kim 					unsigned int segno)
2373351df4b2SJaegeuk Kim {
23742cc22186SGu Zheng 	return get_meta_page(sbi, current_sit_addr(sbi, segno));
2375351df4b2SJaegeuk Kim }
2376351df4b2SJaegeuk Kim 
2377351df4b2SJaegeuk Kim static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
2378351df4b2SJaegeuk Kim 					unsigned int start)
2379351df4b2SJaegeuk Kim {
2380351df4b2SJaegeuk Kim 	struct sit_info *sit_i = SIT_I(sbi);
2381351df4b2SJaegeuk Kim 	struct page *src_page, *dst_page;
2382351df4b2SJaegeuk Kim 	pgoff_t src_off, dst_off;
2383351df4b2SJaegeuk Kim 	void *src_addr, *dst_addr;
2384351df4b2SJaegeuk Kim 
2385351df4b2SJaegeuk Kim 	src_off = current_sit_addr(sbi, start);
2386351df4b2SJaegeuk Kim 	dst_off = next_sit_addr(sbi, src_off);
2387351df4b2SJaegeuk Kim 
2388351df4b2SJaegeuk Kim 	/* get current sit block page without lock */
2389351df4b2SJaegeuk Kim 	src_page = get_meta_page(sbi, src_off);
2390351df4b2SJaegeuk Kim 	dst_page = grab_meta_page(sbi, dst_off);
23919850cf4aSJaegeuk Kim 	f2fs_bug_on(sbi, PageDirty(src_page));
2392351df4b2SJaegeuk Kim 
2393351df4b2SJaegeuk Kim 	src_addr = page_address(src_page);
2394351df4b2SJaegeuk Kim 	dst_addr = page_address(dst_page);
239509cbfeafSKirill A. Shutemov 	memcpy(dst_addr, src_addr, PAGE_SIZE);
2396351df4b2SJaegeuk Kim 
2397351df4b2SJaegeuk Kim 	set_page_dirty(dst_page);
2398351df4b2SJaegeuk Kim 	f2fs_put_page(src_page, 1);
2399351df4b2SJaegeuk Kim 
2400351df4b2SJaegeuk Kim 	set_to_next_sit(sit_i, start);
2401351df4b2SJaegeuk Kim 
2402351df4b2SJaegeuk Kim 	return dst_page;
2403351df4b2SJaegeuk Kim }
2404351df4b2SJaegeuk Kim 
2405184a5cd2SChao Yu static struct sit_entry_set *grab_sit_entry_set(void)
2406184a5cd2SChao Yu {
2407184a5cd2SChao Yu 	struct sit_entry_set *ses =
240880c54505SJaegeuk Kim 			f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
2409184a5cd2SChao Yu 
2410184a5cd2SChao Yu 	ses->entry_cnt = 0;
2411184a5cd2SChao Yu 	INIT_LIST_HEAD(&ses->set_list);
2412184a5cd2SChao Yu 	return ses;
2413184a5cd2SChao Yu }
2414184a5cd2SChao Yu 
2415184a5cd2SChao Yu static void release_sit_entry_set(struct sit_entry_set *ses)
2416184a5cd2SChao Yu {
2417184a5cd2SChao Yu 	list_del(&ses->set_list);
2418184a5cd2SChao Yu 	kmem_cache_free(sit_entry_set_slab, ses);
2419184a5cd2SChao Yu }
2420184a5cd2SChao Yu 
2421184a5cd2SChao Yu static void adjust_sit_entry_set(struct sit_entry_set *ses,
2422184a5cd2SChao Yu 						struct list_head *head)
2423184a5cd2SChao Yu {
2424184a5cd2SChao Yu 	struct sit_entry_set *next = ses;
2425184a5cd2SChao Yu 
2426184a5cd2SChao Yu 	if (list_is_last(&ses->set_list, head))
2427184a5cd2SChao Yu 		return;
2428184a5cd2SChao Yu 
2429184a5cd2SChao Yu 	list_for_each_entry_continue(next, head, set_list)
2430184a5cd2SChao Yu 		if (ses->entry_cnt <= next->entry_cnt)
2431184a5cd2SChao Yu 			break;
2432184a5cd2SChao Yu 
2433184a5cd2SChao Yu 	list_move_tail(&ses->set_list, &next->set_list);
2434184a5cd2SChao Yu }
2435184a5cd2SChao Yu 
2436184a5cd2SChao Yu static void add_sit_entry(unsigned int segno, struct list_head *head)
2437184a5cd2SChao Yu {
2438184a5cd2SChao Yu 	struct sit_entry_set *ses;
2439184a5cd2SChao Yu 	unsigned int start_segno = START_SEGNO(segno);
2440184a5cd2SChao Yu 
2441184a5cd2SChao Yu 	list_for_each_entry(ses, head, set_list) {
2442184a5cd2SChao Yu 		if (ses->start_segno == start_segno) {
2443184a5cd2SChao Yu 			ses->entry_cnt++;
2444184a5cd2SChao Yu 			adjust_sit_entry_set(ses, head);
2445184a5cd2SChao Yu 			return;
2446184a5cd2SChao Yu 		}
2447184a5cd2SChao Yu 	}
2448184a5cd2SChao Yu 
2449184a5cd2SChao Yu 	ses = grab_sit_entry_set();
2450184a5cd2SChao Yu 
2451184a5cd2SChao Yu 	ses->start_segno = start_segno;
2452184a5cd2SChao Yu 	ses->entry_cnt++;
2453184a5cd2SChao Yu 	list_add(&ses->set_list, head);
2454184a5cd2SChao Yu }
2455184a5cd2SChao Yu 
2456184a5cd2SChao Yu static void add_sits_in_set(struct f2fs_sb_info *sbi)
2457184a5cd2SChao Yu {
2458184a5cd2SChao Yu 	struct f2fs_sm_info *sm_info = SM_I(sbi);
2459184a5cd2SChao Yu 	struct list_head *set_list = &sm_info->sit_entry_set;
2460184a5cd2SChao Yu 	unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
2461184a5cd2SChao Yu 	unsigned int segno;
2462184a5cd2SChao Yu 
24637cd8558bSJaegeuk Kim 	for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
2464184a5cd2SChao Yu 		add_sit_entry(segno, set_list);
2465184a5cd2SChao Yu }
2466184a5cd2SChao Yu 
2467184a5cd2SChao Yu static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
2468351df4b2SJaegeuk Kim {
2469351df4b2SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2470b7ad7512SChao Yu 	struct f2fs_journal *journal = curseg->journal;
2471351df4b2SJaegeuk Kim 	int i;
2472351df4b2SJaegeuk Kim 
2473b7ad7512SChao Yu 	down_write(&curseg->journal_rwsem);
2474dfc08a12SChao Yu 	for (i = 0; i < sits_in_cursum(journal); i++) {
2475351df4b2SJaegeuk Kim 		unsigned int segno;
2476184a5cd2SChao Yu 		bool dirtied;
2477184a5cd2SChao Yu 
2478dfc08a12SChao Yu 		segno = le32_to_cpu(segno_in_journal(journal, i));
2479184a5cd2SChao Yu 		dirtied = __mark_sit_entry_dirty(sbi, segno);
2480184a5cd2SChao Yu 
2481184a5cd2SChao Yu 		if (!dirtied)
2482184a5cd2SChao Yu 			add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
2483351df4b2SJaegeuk Kim 	}
2484dfc08a12SChao Yu 	update_sits_in_cursum(journal, -i);
2485b7ad7512SChao Yu 	up_write(&curseg->journal_rwsem);
2486351df4b2SJaegeuk Kim }
2487351df4b2SJaegeuk Kim 
24880a8165d7SJaegeuk Kim /*
2489351df4b2SJaegeuk Kim  * CP calls this function, which flushes SIT entries including sit_journal,
2490351df4b2SJaegeuk Kim  * and moves prefree segs to free segs.
2491351df4b2SJaegeuk Kim  */
24924b2fecc8SJaegeuk Kim void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2493351df4b2SJaegeuk Kim {
2494351df4b2SJaegeuk Kim 	struct sit_info *sit_i = SIT_I(sbi);
2495351df4b2SJaegeuk Kim 	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
2496351df4b2SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2497b7ad7512SChao Yu 	struct f2fs_journal *journal = curseg->journal;
2498184a5cd2SChao Yu 	struct sit_entry_set *ses, *tmp;
2499184a5cd2SChao Yu 	struct list_head *head = &SM_I(sbi)->sit_entry_set;
2500184a5cd2SChao Yu 	bool to_journal = true;
25014b2fecc8SJaegeuk Kim 	struct seg_entry *se;
2502351df4b2SJaegeuk Kim 
2503351df4b2SJaegeuk Kim 	mutex_lock(&sit_i->sentry_lock);
2504351df4b2SJaegeuk Kim 
25052b11a74bSWanpeng Li 	if (!sit_i->dirty_sentries)
25062b11a74bSWanpeng Li 		goto out;
25072b11a74bSWanpeng Li 
2508351df4b2SJaegeuk Kim 	/*
2509184a5cd2SChao Yu 	 * add and account sit entries of dirty bitmap in sit entry
2510184a5cd2SChao Yu 	 * set temporarily
2511351df4b2SJaegeuk Kim 	 */
2512184a5cd2SChao Yu 	add_sits_in_set(sbi);
2513351df4b2SJaegeuk Kim 
2514184a5cd2SChao Yu 	/*
2515184a5cd2SChao Yu 	 * if there are no enough space in journal to store dirty sit
2516184a5cd2SChao Yu 	 * entries, remove all entries from journal and add and account
2517184a5cd2SChao Yu 	 * them in sit entry set.
2518184a5cd2SChao Yu 	 */
2519dfc08a12SChao Yu 	if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
2520184a5cd2SChao Yu 		remove_sits_in_journal(sbi);
2521184a5cd2SChao Yu 
2522184a5cd2SChao Yu 	/*
2523184a5cd2SChao Yu 	 * there are two steps to flush sit entries:
2524184a5cd2SChao Yu 	 * #1, flush sit entries to journal in current cold data summary block.
2525184a5cd2SChao Yu 	 * #2, flush sit entries to sit page.
2526184a5cd2SChao Yu 	 */
2527184a5cd2SChao Yu 	list_for_each_entry_safe(ses, tmp, head, set_list) {
25284a257ed6SJaegeuk Kim 		struct page *page = NULL;
2529184a5cd2SChao Yu 		struct f2fs_sit_block *raw_sit = NULL;
2530184a5cd2SChao Yu 		unsigned int start_segno = ses->start_segno;
2531184a5cd2SChao Yu 		unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
25327cd8558bSJaegeuk Kim 						(unsigned long)MAIN_SEGS(sbi));
2533184a5cd2SChao Yu 		unsigned int segno = start_segno;
2534184a5cd2SChao Yu 
2535184a5cd2SChao Yu 		if (to_journal &&
2536dfc08a12SChao Yu 			!__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
2537184a5cd2SChao Yu 			to_journal = false;
2538184a5cd2SChao Yu 
2539b7ad7512SChao Yu 		if (to_journal) {
2540b7ad7512SChao Yu 			down_write(&curseg->journal_rwsem);
2541b7ad7512SChao Yu 		} else {
2542184a5cd2SChao Yu 			page = get_next_sit_page(sbi, start_segno);
2543184a5cd2SChao Yu 			raw_sit = page_address(page);
2544184a5cd2SChao Yu 		}
2545184a5cd2SChao Yu 
2546184a5cd2SChao Yu 		/* flush dirty sit entries in region of current sit set */
2547184a5cd2SChao Yu 		for_each_set_bit_from(segno, bitmap, end) {
2548184a5cd2SChao Yu 			int offset, sit_offset;
25494b2fecc8SJaegeuk Kim 
25504b2fecc8SJaegeuk Kim 			se = get_seg_entry(sbi, segno);
2551351df4b2SJaegeuk Kim 
2552b2955550SJaegeuk Kim 			/* add discard candidates */
2553d7bc2484SJaegeuk Kim 			if (cpc->reason != CP_DISCARD) {
25544b2fecc8SJaegeuk Kim 				cpc->trim_start = segno;
255525290fa5SJaegeuk Kim 				add_discard_addrs(sbi, cpc, false);
25564b2fecc8SJaegeuk Kim 			}
2557b2955550SJaegeuk Kim 
2558184a5cd2SChao Yu 			if (to_journal) {
2559dfc08a12SChao Yu 				offset = lookup_journal_in_cursum(journal,
2560184a5cd2SChao Yu 							SIT_JOURNAL, segno, 1);
2561184a5cd2SChao Yu 				f2fs_bug_on(sbi, offset < 0);
2562dfc08a12SChao Yu 				segno_in_journal(journal, offset) =
2563184a5cd2SChao Yu 							cpu_to_le32(segno);
2564184a5cd2SChao Yu 				seg_info_to_raw_sit(se,
2565dfc08a12SChao Yu 					&sit_in_journal(journal, offset));
2566184a5cd2SChao Yu 			} else {
2567184a5cd2SChao Yu 				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
2568184a5cd2SChao Yu 				seg_info_to_raw_sit(se,
2569184a5cd2SChao Yu 						&raw_sit->entries[sit_offset]);
2570351df4b2SJaegeuk Kim 			}
2571351df4b2SJaegeuk Kim 
2572351df4b2SJaegeuk Kim 			__clear_bit(segno, bitmap);
2573351df4b2SJaegeuk Kim 			sit_i->dirty_sentries--;
2574184a5cd2SChao Yu 			ses->entry_cnt--;
2575351df4b2SJaegeuk Kim 		}
2576184a5cd2SChao Yu 
2577b7ad7512SChao Yu 		if (to_journal)
2578b7ad7512SChao Yu 			up_write(&curseg->journal_rwsem);
2579b7ad7512SChao Yu 		else
2580184a5cd2SChao Yu 			f2fs_put_page(page, 1);
2581184a5cd2SChao Yu 
2582184a5cd2SChao Yu 		f2fs_bug_on(sbi, ses->entry_cnt);
2583184a5cd2SChao Yu 		release_sit_entry_set(ses);
2584184a5cd2SChao Yu 	}
2585184a5cd2SChao Yu 
2586184a5cd2SChao Yu 	f2fs_bug_on(sbi, !list_empty(head));
2587184a5cd2SChao Yu 	f2fs_bug_on(sbi, sit_i->dirty_sentries);
2588184a5cd2SChao Yu out:
25894b2fecc8SJaegeuk Kim 	if (cpc->reason == CP_DISCARD) {
2590650d3c4eSYunlei He 		__u64 trim_start = cpc->trim_start;
2591650d3c4eSYunlei He 
25924b2fecc8SJaegeuk Kim 		for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
259325290fa5SJaegeuk Kim 			add_discard_addrs(sbi, cpc, false);
2594650d3c4eSYunlei He 
2595650d3c4eSYunlei He 		cpc->trim_start = trim_start;
25964b2fecc8SJaegeuk Kim 	}
2597351df4b2SJaegeuk Kim 	mutex_unlock(&sit_i->sentry_lock);
2598351df4b2SJaegeuk Kim 
2599351df4b2SJaegeuk Kim 	set_prefree_as_free_segments(sbi);
2600351df4b2SJaegeuk Kim }
2601351df4b2SJaegeuk Kim 
2602351df4b2SJaegeuk Kim static int build_sit_info(struct f2fs_sb_info *sbi)
2603351df4b2SJaegeuk Kim {
2604351df4b2SJaegeuk Kim 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2605351df4b2SJaegeuk Kim 	struct sit_info *sit_i;
2606351df4b2SJaegeuk Kim 	unsigned int sit_segs, start;
2607ae27d62eSChao Yu 	char *src_bitmap;
2608351df4b2SJaegeuk Kim 	unsigned int bitmap_size;
2609351df4b2SJaegeuk Kim 
2610351df4b2SJaegeuk Kim 	/* allocate memory for SIT information */
2611351df4b2SJaegeuk Kim 	sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
2612351df4b2SJaegeuk Kim 	if (!sit_i)
2613351df4b2SJaegeuk Kim 		return -ENOMEM;
2614351df4b2SJaegeuk Kim 
2615351df4b2SJaegeuk Kim 	SM_I(sbi)->sit_info = sit_i;
2616351df4b2SJaegeuk Kim 
261739307a8eSJaegeuk Kim 	sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) *
261839307a8eSJaegeuk Kim 					sizeof(struct seg_entry), GFP_KERNEL);
2619351df4b2SJaegeuk Kim 	if (!sit_i->sentries)
2620351df4b2SJaegeuk Kim 		return -ENOMEM;
2621351df4b2SJaegeuk Kim 
26227cd8558bSJaegeuk Kim 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
262339307a8eSJaegeuk Kim 	sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
2624351df4b2SJaegeuk Kim 	if (!sit_i->dirty_sentries_bitmap)
2625351df4b2SJaegeuk Kim 		return -ENOMEM;
2626351df4b2SJaegeuk Kim 
26277cd8558bSJaegeuk Kim 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
2628351df4b2SJaegeuk Kim 		sit_i->sentries[start].cur_valid_map
2629351df4b2SJaegeuk Kim 			= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2630351df4b2SJaegeuk Kim 		sit_i->sentries[start].ckpt_valid_map
2631351df4b2SJaegeuk Kim 			= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
26323e025740SJaegeuk Kim 		if (!sit_i->sentries[start].cur_valid_map ||
26333e025740SJaegeuk Kim 				!sit_i->sentries[start].ckpt_valid_map)
26343e025740SJaegeuk Kim 			return -ENOMEM;
26353e025740SJaegeuk Kim 
2636355e7891SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
2637355e7891SChao Yu 		sit_i->sentries[start].cur_valid_map_mir
2638355e7891SChao Yu 			= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2639355e7891SChao Yu 		if (!sit_i->sentries[start].cur_valid_map_mir)
2640355e7891SChao Yu 			return -ENOMEM;
2641355e7891SChao Yu #endif
2642355e7891SChao Yu 
26433e025740SJaegeuk Kim 		if (f2fs_discard_en(sbi)) {
2644a66cdd98SJaegeuk Kim 			sit_i->sentries[start].discard_map
2645a66cdd98SJaegeuk Kim 				= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
26463e025740SJaegeuk Kim 			if (!sit_i->sentries[start].discard_map)
2647351df4b2SJaegeuk Kim 				return -ENOMEM;
2648351df4b2SJaegeuk Kim 		}
26493e025740SJaegeuk Kim 	}
2650351df4b2SJaegeuk Kim 
265160a3b782SJaegeuk Kim 	sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
265260a3b782SJaegeuk Kim 	if (!sit_i->tmp_map)
265360a3b782SJaegeuk Kim 		return -ENOMEM;
265460a3b782SJaegeuk Kim 
2655351df4b2SJaegeuk Kim 	if (sbi->segs_per_sec > 1) {
265639307a8eSJaegeuk Kim 		sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) *
265739307a8eSJaegeuk Kim 					sizeof(struct sec_entry), GFP_KERNEL);
2658351df4b2SJaegeuk Kim 		if (!sit_i->sec_entries)
2659351df4b2SJaegeuk Kim 			return -ENOMEM;
2660351df4b2SJaegeuk Kim 	}
2661351df4b2SJaegeuk Kim 
2662351df4b2SJaegeuk Kim 	/* get information related with SIT */
2663351df4b2SJaegeuk Kim 	sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
2664351df4b2SJaegeuk Kim 
2665351df4b2SJaegeuk Kim 	/* setup SIT bitmap from ckeckpoint pack */
2666351df4b2SJaegeuk Kim 	bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
2667351df4b2SJaegeuk Kim 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
2668351df4b2SJaegeuk Kim 
2669ae27d62eSChao Yu 	sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
2670ae27d62eSChao Yu 	if (!sit_i->sit_bitmap)
2671351df4b2SJaegeuk Kim 		return -ENOMEM;
2672351df4b2SJaegeuk Kim 
2673ae27d62eSChao Yu #ifdef CONFIG_F2FS_CHECK_FS
2674ae27d62eSChao Yu 	sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
2675ae27d62eSChao Yu 	if (!sit_i->sit_bitmap_mir)
2676ae27d62eSChao Yu 		return -ENOMEM;
2677ae27d62eSChao Yu #endif
2678ae27d62eSChao Yu 
2679351df4b2SJaegeuk Kim 	/* init SIT information */
2680351df4b2SJaegeuk Kim 	sit_i->s_ops = &default_salloc_ops;
2681351df4b2SJaegeuk Kim 
2682351df4b2SJaegeuk Kim 	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
2683351df4b2SJaegeuk Kim 	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
2684c79b7ff1SJaegeuk Kim 	sit_i->written_valid_blocks = 0;
2685351df4b2SJaegeuk Kim 	sit_i->bitmap_size = bitmap_size;
2686351df4b2SJaegeuk Kim 	sit_i->dirty_sentries = 0;
2687351df4b2SJaegeuk Kim 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
2688351df4b2SJaegeuk Kim 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
2689351df4b2SJaegeuk Kim 	sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
2690351df4b2SJaegeuk Kim 	mutex_init(&sit_i->sentry_lock);
2691351df4b2SJaegeuk Kim 	return 0;
2692351df4b2SJaegeuk Kim }
2693351df4b2SJaegeuk Kim 
2694351df4b2SJaegeuk Kim static int build_free_segmap(struct f2fs_sb_info *sbi)
2695351df4b2SJaegeuk Kim {
2696351df4b2SJaegeuk Kim 	struct free_segmap_info *free_i;
2697351df4b2SJaegeuk Kim 	unsigned int bitmap_size, sec_bitmap_size;
2698351df4b2SJaegeuk Kim 
2699351df4b2SJaegeuk Kim 	/* allocate memory for free segmap information */
2700351df4b2SJaegeuk Kim 	free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
2701351df4b2SJaegeuk Kim 	if (!free_i)
2702351df4b2SJaegeuk Kim 		return -ENOMEM;
2703351df4b2SJaegeuk Kim 
2704351df4b2SJaegeuk Kim 	SM_I(sbi)->free_info = free_i;
2705351df4b2SJaegeuk Kim 
27067cd8558bSJaegeuk Kim 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
270739307a8eSJaegeuk Kim 	free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL);
2708351df4b2SJaegeuk Kim 	if (!free_i->free_segmap)
2709351df4b2SJaegeuk Kim 		return -ENOMEM;
2710351df4b2SJaegeuk Kim 
27117cd8558bSJaegeuk Kim 	sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
271239307a8eSJaegeuk Kim 	free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL);
2713351df4b2SJaegeuk Kim 	if (!free_i->free_secmap)
2714351df4b2SJaegeuk Kim 		return -ENOMEM;
2715351df4b2SJaegeuk Kim 
2716351df4b2SJaegeuk Kim 	/* set all segments as dirty temporarily */
2717351df4b2SJaegeuk Kim 	memset(free_i->free_segmap, 0xff, bitmap_size);
2718351df4b2SJaegeuk Kim 	memset(free_i->free_secmap, 0xff, sec_bitmap_size);
2719351df4b2SJaegeuk Kim 
2720351df4b2SJaegeuk Kim 	/* init free segmap information */
27217cd8558bSJaegeuk Kim 	free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
2722351df4b2SJaegeuk Kim 	free_i->free_segments = 0;
2723351df4b2SJaegeuk Kim 	free_i->free_sections = 0;
27241a118ccfSChao Yu 	spin_lock_init(&free_i->segmap_lock);
2725351df4b2SJaegeuk Kim 	return 0;
2726351df4b2SJaegeuk Kim }
2727351df4b2SJaegeuk Kim 
2728351df4b2SJaegeuk Kim static int build_curseg(struct f2fs_sb_info *sbi)
2729351df4b2SJaegeuk Kim {
27301042d60fSNamjae Jeon 	struct curseg_info *array;
2731351df4b2SJaegeuk Kim 	int i;
2732351df4b2SJaegeuk Kim 
2733b434babfSFabian Frederick 	array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
2734351df4b2SJaegeuk Kim 	if (!array)
2735351df4b2SJaegeuk Kim 		return -ENOMEM;
2736351df4b2SJaegeuk Kim 
2737351df4b2SJaegeuk Kim 	SM_I(sbi)->curseg_array = array;
2738351df4b2SJaegeuk Kim 
2739351df4b2SJaegeuk Kim 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
2740351df4b2SJaegeuk Kim 		mutex_init(&array[i].curseg_mutex);
274109cbfeafSKirill A. Shutemov 		array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
2742351df4b2SJaegeuk Kim 		if (!array[i].sum_blk)
2743351df4b2SJaegeuk Kim 			return -ENOMEM;
2744b7ad7512SChao Yu 		init_rwsem(&array[i].journal_rwsem);
2745b7ad7512SChao Yu 		array[i].journal = kzalloc(sizeof(struct f2fs_journal),
2746b7ad7512SChao Yu 							GFP_KERNEL);
2747b7ad7512SChao Yu 		if (!array[i].journal)
2748b7ad7512SChao Yu 			return -ENOMEM;
2749351df4b2SJaegeuk Kim 		array[i].segno = NULL_SEGNO;
2750351df4b2SJaegeuk Kim 		array[i].next_blkoff = 0;
2751351df4b2SJaegeuk Kim 	}
2752351df4b2SJaegeuk Kim 	return restore_curseg_summaries(sbi);
2753351df4b2SJaegeuk Kim }
2754351df4b2SJaegeuk Kim 
2755351df4b2SJaegeuk Kim static void build_sit_entries(struct f2fs_sb_info *sbi)
2756351df4b2SJaegeuk Kim {
2757351df4b2SJaegeuk Kim 	struct sit_info *sit_i = SIT_I(sbi);
2758351df4b2SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2759b7ad7512SChao Yu 	struct f2fs_journal *journal = curseg->journal;
27609c094040SYunlei He 	struct seg_entry *se;
27619c094040SYunlei He 	struct f2fs_sit_entry sit;
276274de593aSChao Yu 	int sit_blk_cnt = SIT_BLK_CNT(sbi);
276374de593aSChao Yu 	unsigned int i, start, end;
276474de593aSChao Yu 	unsigned int readed, start_blk = 0;
2765351df4b2SJaegeuk Kim 
276674de593aSChao Yu 	do {
2767664ba972SJaegeuk Kim 		readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
2768664ba972SJaegeuk Kim 							META_SIT, true);
276974de593aSChao Yu 
277074de593aSChao Yu 		start = start_blk * sit_i->sents_per_block;
277174de593aSChao Yu 		end = (start_blk + readed) * sit_i->sents_per_block;
277274de593aSChao Yu 
27737cd8558bSJaegeuk Kim 		for (; start < end && start < MAIN_SEGS(sbi); start++) {
2774351df4b2SJaegeuk Kim 			struct f2fs_sit_block *sit_blk;
2775351df4b2SJaegeuk Kim 			struct page *page;
2776351df4b2SJaegeuk Kim 
27779c094040SYunlei He 			se = &sit_i->sentries[start];
2778351df4b2SJaegeuk Kim 			page = get_current_sit_page(sbi, start);
2779351df4b2SJaegeuk Kim 			sit_blk = (struct f2fs_sit_block *)page_address(page);
2780351df4b2SJaegeuk Kim 			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
2781351df4b2SJaegeuk Kim 			f2fs_put_page(page, 1);
2782d600af23SChao Yu 
2783351df4b2SJaegeuk Kim 			check_block_count(sbi, start, &sit);
2784351df4b2SJaegeuk Kim 			seg_info_from_raw_sit(se, &sit);
2785a66cdd98SJaegeuk Kim 
2786a66cdd98SJaegeuk Kim 			/* build discard map only one time */
27873e025740SJaegeuk Kim 			if (f2fs_discard_en(sbi)) {
27883e025740SJaegeuk Kim 				memcpy(se->discard_map, se->cur_valid_map,
27893e025740SJaegeuk Kim 							SIT_VBLOCK_MAP_SIZE);
27903e025740SJaegeuk Kim 				sbi->discard_blks += sbi->blocks_per_seg -
27913e025740SJaegeuk Kim 							se->valid_blocks;
27923e025740SJaegeuk Kim 			}
2793a66cdd98SJaegeuk Kim 
2794d600af23SChao Yu 			if (sbi->segs_per_sec > 1)
2795d600af23SChao Yu 				get_sec_entry(sbi, start)->valid_blocks +=
2796d600af23SChao Yu 							se->valid_blocks;
2797351df4b2SJaegeuk Kim 		}
279874de593aSChao Yu 		start_blk += readed;
279974de593aSChao Yu 	} while (start_blk < sit_blk_cnt);
2800d600af23SChao Yu 
2801d600af23SChao Yu 	down_read(&curseg->journal_rwsem);
2802d600af23SChao Yu 	for (i = 0; i < sits_in_cursum(journal); i++) {
2803d600af23SChao Yu 		unsigned int old_valid_blocks;
2804d600af23SChao Yu 
2805d600af23SChao Yu 		start = le32_to_cpu(segno_in_journal(journal, i));
2806d600af23SChao Yu 		se = &sit_i->sentries[start];
2807d600af23SChao Yu 		sit = sit_in_journal(journal, i);
2808d600af23SChao Yu 
2809d600af23SChao Yu 		old_valid_blocks = se->valid_blocks;
2810d600af23SChao Yu 
2811d600af23SChao Yu 		check_block_count(sbi, start, &sit);
2812d600af23SChao Yu 		seg_info_from_raw_sit(se, &sit);
2813d600af23SChao Yu 
2814d600af23SChao Yu 		if (f2fs_discard_en(sbi)) {
2815d600af23SChao Yu 			memcpy(se->discard_map, se->cur_valid_map,
2816d600af23SChao Yu 						SIT_VBLOCK_MAP_SIZE);
2817d600af23SChao Yu 			sbi->discard_blks += old_valid_blocks -
2818d600af23SChao Yu 						se->valid_blocks;
2819d600af23SChao Yu 		}
2820d600af23SChao Yu 
2821d600af23SChao Yu 		if (sbi->segs_per_sec > 1)
2822d600af23SChao Yu 			get_sec_entry(sbi, start)->valid_blocks +=
2823d600af23SChao Yu 				se->valid_blocks - old_valid_blocks;
2824d600af23SChao Yu 	}
2825d600af23SChao Yu 	up_read(&curseg->journal_rwsem);
2826351df4b2SJaegeuk Kim }
2827351df4b2SJaegeuk Kim 
2828351df4b2SJaegeuk Kim static void init_free_segmap(struct f2fs_sb_info *sbi)
2829351df4b2SJaegeuk Kim {
2830351df4b2SJaegeuk Kim 	unsigned int start;
2831351df4b2SJaegeuk Kim 	int type;
2832351df4b2SJaegeuk Kim 
28337cd8558bSJaegeuk Kim 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
2834351df4b2SJaegeuk Kim 		struct seg_entry *sentry = get_seg_entry(sbi, start);
2835351df4b2SJaegeuk Kim 		if (!sentry->valid_blocks)
2836351df4b2SJaegeuk Kim 			__set_free(sbi, start);
2837c79b7ff1SJaegeuk Kim 		else
2838c79b7ff1SJaegeuk Kim 			SIT_I(sbi)->written_valid_blocks +=
2839c79b7ff1SJaegeuk Kim 						sentry->valid_blocks;
2840351df4b2SJaegeuk Kim 	}
2841351df4b2SJaegeuk Kim 
2842351df4b2SJaegeuk Kim 	/* set use the current segments */
2843351df4b2SJaegeuk Kim 	for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
2844351df4b2SJaegeuk Kim 		struct curseg_info *curseg_t = CURSEG_I(sbi, type);
2845351df4b2SJaegeuk Kim 		__set_test_and_inuse(sbi, curseg_t->segno);
2846351df4b2SJaegeuk Kim 	}
2847351df4b2SJaegeuk Kim }
2848351df4b2SJaegeuk Kim 
2849351df4b2SJaegeuk Kim static void init_dirty_segmap(struct f2fs_sb_info *sbi)
2850351df4b2SJaegeuk Kim {
2851351df4b2SJaegeuk Kim 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2852351df4b2SJaegeuk Kim 	struct free_segmap_info *free_i = FREE_I(sbi);
28537cd8558bSJaegeuk Kim 	unsigned int segno = 0, offset = 0;
2854351df4b2SJaegeuk Kim 	unsigned short valid_blocks;
2855351df4b2SJaegeuk Kim 
28568736fbf0SNamjae Jeon 	while (1) {
2857351df4b2SJaegeuk Kim 		/* find dirty segment based on free segmap */
28587cd8558bSJaegeuk Kim 		segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
28597cd8558bSJaegeuk Kim 		if (segno >= MAIN_SEGS(sbi))
2860351df4b2SJaegeuk Kim 			break;
2861351df4b2SJaegeuk Kim 		offset = segno + 1;
2862302bd348SJaegeuk Kim 		valid_blocks = get_valid_blocks(sbi, segno, false);
2863ec325b52SJaegeuk Kim 		if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
2864351df4b2SJaegeuk Kim 			continue;
2865ec325b52SJaegeuk Kim 		if (valid_blocks > sbi->blocks_per_seg) {
2866ec325b52SJaegeuk Kim 			f2fs_bug_on(sbi, 1);
2867ec325b52SJaegeuk Kim 			continue;
2868ec325b52SJaegeuk Kim 		}
2869351df4b2SJaegeuk Kim 		mutex_lock(&dirty_i->seglist_lock);
2870351df4b2SJaegeuk Kim 		__locate_dirty_segment(sbi, segno, DIRTY);
2871351df4b2SJaegeuk Kim 		mutex_unlock(&dirty_i->seglist_lock);
2872351df4b2SJaegeuk Kim 	}
2873351df4b2SJaegeuk Kim }
2874351df4b2SJaegeuk Kim 
28755ec4e49fSJaegeuk Kim static int init_victim_secmap(struct f2fs_sb_info *sbi)
2876351df4b2SJaegeuk Kim {
2877351df4b2SJaegeuk Kim 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
28787cd8558bSJaegeuk Kim 	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
2879351df4b2SJaegeuk Kim 
288039307a8eSJaegeuk Kim 	dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
28815ec4e49fSJaegeuk Kim 	if (!dirty_i->victim_secmap)
2882351df4b2SJaegeuk Kim 		return -ENOMEM;
2883351df4b2SJaegeuk Kim 	return 0;
2884351df4b2SJaegeuk Kim }
2885351df4b2SJaegeuk Kim 
2886351df4b2SJaegeuk Kim static int build_dirty_segmap(struct f2fs_sb_info *sbi)
2887351df4b2SJaegeuk Kim {
2888351df4b2SJaegeuk Kim 	struct dirty_seglist_info *dirty_i;
2889351df4b2SJaegeuk Kim 	unsigned int bitmap_size, i;
2890351df4b2SJaegeuk Kim 
2891351df4b2SJaegeuk Kim 	/* allocate memory for dirty segments list information */
2892351df4b2SJaegeuk Kim 	dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
2893351df4b2SJaegeuk Kim 	if (!dirty_i)
2894351df4b2SJaegeuk Kim 		return -ENOMEM;
2895351df4b2SJaegeuk Kim 
2896351df4b2SJaegeuk Kim 	SM_I(sbi)->dirty_info = dirty_i;
2897351df4b2SJaegeuk Kim 	mutex_init(&dirty_i->seglist_lock);
2898351df4b2SJaegeuk Kim 
28997cd8558bSJaegeuk Kim 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2900351df4b2SJaegeuk Kim 
2901351df4b2SJaegeuk Kim 	for (i = 0; i < NR_DIRTY_TYPE; i++) {
290239307a8eSJaegeuk Kim 		dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
2903351df4b2SJaegeuk Kim 		if (!dirty_i->dirty_segmap[i])
2904351df4b2SJaegeuk Kim 			return -ENOMEM;
2905351df4b2SJaegeuk Kim 	}
2906351df4b2SJaegeuk Kim 
2907351df4b2SJaegeuk Kim 	init_dirty_segmap(sbi);
29085ec4e49fSJaegeuk Kim 	return init_victim_secmap(sbi);
2909351df4b2SJaegeuk Kim }
2910351df4b2SJaegeuk Kim 
29110a8165d7SJaegeuk Kim /*
2912351df4b2SJaegeuk Kim  * Update min, max modified time for cost-benefit GC algorithm
2913351df4b2SJaegeuk Kim  */
2914351df4b2SJaegeuk Kim static void init_min_max_mtime(struct f2fs_sb_info *sbi)
2915351df4b2SJaegeuk Kim {
2916351df4b2SJaegeuk Kim 	struct sit_info *sit_i = SIT_I(sbi);
2917351df4b2SJaegeuk Kim 	unsigned int segno;
2918351df4b2SJaegeuk Kim 
2919351df4b2SJaegeuk Kim 	mutex_lock(&sit_i->sentry_lock);
2920351df4b2SJaegeuk Kim 
2921351df4b2SJaegeuk Kim 	sit_i->min_mtime = LLONG_MAX;
2922351df4b2SJaegeuk Kim 
29237cd8558bSJaegeuk Kim 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
2924351df4b2SJaegeuk Kim 		unsigned int i;
2925351df4b2SJaegeuk Kim 		unsigned long long mtime = 0;
2926351df4b2SJaegeuk Kim 
2927351df4b2SJaegeuk Kim 		for (i = 0; i < sbi->segs_per_sec; i++)
2928351df4b2SJaegeuk Kim 			mtime += get_seg_entry(sbi, segno + i)->mtime;
2929351df4b2SJaegeuk Kim 
2930351df4b2SJaegeuk Kim 		mtime = div_u64(mtime, sbi->segs_per_sec);
2931351df4b2SJaegeuk Kim 
2932351df4b2SJaegeuk Kim 		if (sit_i->min_mtime > mtime)
2933351df4b2SJaegeuk Kim 			sit_i->min_mtime = mtime;
2934351df4b2SJaegeuk Kim 	}
2935351df4b2SJaegeuk Kim 	sit_i->max_mtime = get_mtime(sbi);
2936351df4b2SJaegeuk Kim 	mutex_unlock(&sit_i->sentry_lock);
2937351df4b2SJaegeuk Kim }
2938351df4b2SJaegeuk Kim 
2939351df4b2SJaegeuk Kim int build_segment_manager(struct f2fs_sb_info *sbi)
2940351df4b2SJaegeuk Kim {
2941351df4b2SJaegeuk Kim 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2942351df4b2SJaegeuk Kim 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
29431042d60fSNamjae Jeon 	struct f2fs_sm_info *sm_info;
2944351df4b2SJaegeuk Kim 	int err;
2945351df4b2SJaegeuk Kim 
2946351df4b2SJaegeuk Kim 	sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
2947351df4b2SJaegeuk Kim 	if (!sm_info)
2948351df4b2SJaegeuk Kim 		return -ENOMEM;
2949351df4b2SJaegeuk Kim 
2950351df4b2SJaegeuk Kim 	/* init sm info */
2951351df4b2SJaegeuk Kim 	sbi->sm_info = sm_info;
2952351df4b2SJaegeuk Kim 	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2953351df4b2SJaegeuk Kim 	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2954351df4b2SJaegeuk Kim 	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
2955351df4b2SJaegeuk Kim 	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
2956351df4b2SJaegeuk Kim 	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
2957351df4b2SJaegeuk Kim 	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
2958351df4b2SJaegeuk Kim 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
295958c41035SJaegeuk Kim 	sm_info->rec_prefree_segments = sm_info->main_segments *
296058c41035SJaegeuk Kim 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
296144a83499SJaegeuk Kim 	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
296244a83499SJaegeuk Kim 		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
296344a83499SJaegeuk Kim 
296452763a4bSJaegeuk Kim 	if (!test_opt(sbi, LFS))
29659b5f136fSJaegeuk Kim 		sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
2966216fbd64SJaegeuk Kim 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
2967c1ce1b02SJaegeuk Kim 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
2968ef095d19SJaegeuk Kim 	sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
2969351df4b2SJaegeuk Kim 
2970bba681cbSJaegeuk Kim 	sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
2971bba681cbSJaegeuk Kim 
2972184a5cd2SChao Yu 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
2973184a5cd2SChao Yu 
2974b270ad6fSGu Zheng 	if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
29752163d198SGu Zheng 		err = create_flush_cmd_control(sbi);
29762163d198SGu Zheng 		if (err)
2977a688b9d9SGu Zheng 			return err;
2978a688b9d9SGu Zheng 	}
29796b4afdd7SJaegeuk Kim 
29800b54fb84SJaegeuk Kim 	err = create_discard_cmd_control(sbi);
29810b54fb84SJaegeuk Kim 	if (err)
29820b54fb84SJaegeuk Kim 		return err;
29830b54fb84SJaegeuk Kim 
2984351df4b2SJaegeuk Kim 	err = build_sit_info(sbi);
2985351df4b2SJaegeuk Kim 	if (err)
2986351df4b2SJaegeuk Kim 		return err;
2987351df4b2SJaegeuk Kim 	err = build_free_segmap(sbi);
2988351df4b2SJaegeuk Kim 	if (err)
2989351df4b2SJaegeuk Kim 		return err;
2990351df4b2SJaegeuk Kim 	err = build_curseg(sbi);
2991351df4b2SJaegeuk Kim 	if (err)
2992351df4b2SJaegeuk Kim 		return err;
2993351df4b2SJaegeuk Kim 
2994351df4b2SJaegeuk Kim 	/* reinit free segmap based on SIT */
2995351df4b2SJaegeuk Kim 	build_sit_entries(sbi);
2996351df4b2SJaegeuk Kim 
2997351df4b2SJaegeuk Kim 	init_free_segmap(sbi);
2998351df4b2SJaegeuk Kim 	err = build_dirty_segmap(sbi);
2999351df4b2SJaegeuk Kim 	if (err)
3000351df4b2SJaegeuk Kim 		return err;
3001351df4b2SJaegeuk Kim 
3002351df4b2SJaegeuk Kim 	init_min_max_mtime(sbi);
3003351df4b2SJaegeuk Kim 	return 0;
3004351df4b2SJaegeuk Kim }
3005351df4b2SJaegeuk Kim 
3006351df4b2SJaegeuk Kim static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
3007351df4b2SJaegeuk Kim 		enum dirty_type dirty_type)
3008351df4b2SJaegeuk Kim {
3009351df4b2SJaegeuk Kim 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3010351df4b2SJaegeuk Kim 
3011351df4b2SJaegeuk Kim 	mutex_lock(&dirty_i->seglist_lock);
301239307a8eSJaegeuk Kim 	kvfree(dirty_i->dirty_segmap[dirty_type]);
3013351df4b2SJaegeuk Kim 	dirty_i->nr_dirty[dirty_type] = 0;
3014351df4b2SJaegeuk Kim 	mutex_unlock(&dirty_i->seglist_lock);
3015351df4b2SJaegeuk Kim }
3016351df4b2SJaegeuk Kim 
30175ec4e49fSJaegeuk Kim static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
3018351df4b2SJaegeuk Kim {
3019351df4b2SJaegeuk Kim 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
302039307a8eSJaegeuk Kim 	kvfree(dirty_i->victim_secmap);
3021351df4b2SJaegeuk Kim }
3022351df4b2SJaegeuk Kim 
3023351df4b2SJaegeuk Kim static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
3024351df4b2SJaegeuk Kim {
3025351df4b2SJaegeuk Kim 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3026351df4b2SJaegeuk Kim 	int i;
3027351df4b2SJaegeuk Kim 
3028351df4b2SJaegeuk Kim 	if (!dirty_i)
3029351df4b2SJaegeuk Kim 		return;
3030351df4b2SJaegeuk Kim 
3031351df4b2SJaegeuk Kim 	/* discard pre-free/dirty segments list */
3032351df4b2SJaegeuk Kim 	for (i = 0; i < NR_DIRTY_TYPE; i++)
3033351df4b2SJaegeuk Kim 		discard_dirty_segmap(sbi, i);
3034351df4b2SJaegeuk Kim 
30355ec4e49fSJaegeuk Kim 	destroy_victim_secmap(sbi);
3036351df4b2SJaegeuk Kim 	SM_I(sbi)->dirty_info = NULL;
3037351df4b2SJaegeuk Kim 	kfree(dirty_i);
3038351df4b2SJaegeuk Kim }
3039351df4b2SJaegeuk Kim 
3040351df4b2SJaegeuk Kim static void destroy_curseg(struct f2fs_sb_info *sbi)
3041351df4b2SJaegeuk Kim {
3042351df4b2SJaegeuk Kim 	struct curseg_info *array = SM_I(sbi)->curseg_array;
3043351df4b2SJaegeuk Kim 	int i;
3044351df4b2SJaegeuk Kim 
3045351df4b2SJaegeuk Kim 	if (!array)
3046351df4b2SJaegeuk Kim 		return;
3047351df4b2SJaegeuk Kim 	SM_I(sbi)->curseg_array = NULL;
3048b7ad7512SChao Yu 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
3049351df4b2SJaegeuk Kim 		kfree(array[i].sum_blk);
3050b7ad7512SChao Yu 		kfree(array[i].journal);
3051b7ad7512SChao Yu 	}
3052351df4b2SJaegeuk Kim 	kfree(array);
3053351df4b2SJaegeuk Kim }
3054351df4b2SJaegeuk Kim 
3055351df4b2SJaegeuk Kim static void destroy_free_segmap(struct f2fs_sb_info *sbi)
3056351df4b2SJaegeuk Kim {
3057351df4b2SJaegeuk Kim 	struct free_segmap_info *free_i = SM_I(sbi)->free_info;
3058351df4b2SJaegeuk Kim 	if (!free_i)
3059351df4b2SJaegeuk Kim 		return;
3060351df4b2SJaegeuk Kim 	SM_I(sbi)->free_info = NULL;
306139307a8eSJaegeuk Kim 	kvfree(free_i->free_segmap);
306239307a8eSJaegeuk Kim 	kvfree(free_i->free_secmap);
3063351df4b2SJaegeuk Kim 	kfree(free_i);
3064351df4b2SJaegeuk Kim }
3065351df4b2SJaegeuk Kim 
3066351df4b2SJaegeuk Kim static void destroy_sit_info(struct f2fs_sb_info *sbi)
3067351df4b2SJaegeuk Kim {
3068351df4b2SJaegeuk Kim 	struct sit_info *sit_i = SIT_I(sbi);
3069351df4b2SJaegeuk Kim 	unsigned int start;
3070351df4b2SJaegeuk Kim 
3071351df4b2SJaegeuk Kim 	if (!sit_i)
3072351df4b2SJaegeuk Kim 		return;
3073351df4b2SJaegeuk Kim 
3074351df4b2SJaegeuk Kim 	if (sit_i->sentries) {
30757cd8558bSJaegeuk Kim 		for (start = 0; start < MAIN_SEGS(sbi); start++) {
3076351df4b2SJaegeuk Kim 			kfree(sit_i->sentries[start].cur_valid_map);
3077355e7891SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
3078355e7891SChao Yu 			kfree(sit_i->sentries[start].cur_valid_map_mir);
3079355e7891SChao Yu #endif
3080351df4b2SJaegeuk Kim 			kfree(sit_i->sentries[start].ckpt_valid_map);
3081a66cdd98SJaegeuk Kim 			kfree(sit_i->sentries[start].discard_map);
3082351df4b2SJaegeuk Kim 		}
3083351df4b2SJaegeuk Kim 	}
308460a3b782SJaegeuk Kim 	kfree(sit_i->tmp_map);
308560a3b782SJaegeuk Kim 
308639307a8eSJaegeuk Kim 	kvfree(sit_i->sentries);
308739307a8eSJaegeuk Kim 	kvfree(sit_i->sec_entries);
308839307a8eSJaegeuk Kim 	kvfree(sit_i->dirty_sentries_bitmap);
3089351df4b2SJaegeuk Kim 
3090351df4b2SJaegeuk Kim 	SM_I(sbi)->sit_info = NULL;
3091351df4b2SJaegeuk Kim 	kfree(sit_i->sit_bitmap);
3092ae27d62eSChao Yu #ifdef CONFIG_F2FS_CHECK_FS
3093ae27d62eSChao Yu 	kfree(sit_i->sit_bitmap_mir);
3094ae27d62eSChao Yu #endif
3095351df4b2SJaegeuk Kim 	kfree(sit_i);
3096351df4b2SJaegeuk Kim }
3097351df4b2SJaegeuk Kim 
3098351df4b2SJaegeuk Kim void destroy_segment_manager(struct f2fs_sb_info *sbi)
3099351df4b2SJaegeuk Kim {
3100351df4b2SJaegeuk Kim 	struct f2fs_sm_info *sm_info = SM_I(sbi);
3101a688b9d9SGu Zheng 
31023b03f724SChao Yu 	if (!sm_info)
31033b03f724SChao Yu 		return;
31045eba8c5dSJaegeuk Kim 	destroy_flush_cmd_control(sbi, true);
3105f099405fSChao Yu 	destroy_discard_cmd_control(sbi);
3106351df4b2SJaegeuk Kim 	destroy_dirty_segmap(sbi);
3107351df4b2SJaegeuk Kim 	destroy_curseg(sbi);
3108351df4b2SJaegeuk Kim 	destroy_free_segmap(sbi);
3109351df4b2SJaegeuk Kim 	destroy_sit_info(sbi);
3110351df4b2SJaegeuk Kim 	sbi->sm_info = NULL;
3111351df4b2SJaegeuk Kim 	kfree(sm_info);
3112351df4b2SJaegeuk Kim }
31137fd9e544SJaegeuk Kim 
31147fd9e544SJaegeuk Kim int __init create_segment_manager_caches(void)
31157fd9e544SJaegeuk Kim {
31167fd9e544SJaegeuk Kim 	discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
3117e8512d2eSGu Zheng 			sizeof(struct discard_entry));
31187fd9e544SJaegeuk Kim 	if (!discard_entry_slab)
3119184a5cd2SChao Yu 		goto fail;
3120184a5cd2SChao Yu 
3121b01a9201SJaegeuk Kim 	discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
3122b01a9201SJaegeuk Kim 			sizeof(struct discard_cmd));
3123b01a9201SJaegeuk Kim 	if (!discard_cmd_slab)
31246ab2a308SChao Yu 		goto destroy_discard_entry;
3125275b66b0SChao Yu 
3126184a5cd2SChao Yu 	sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
3127c9ee0085SChangman Lee 			sizeof(struct sit_entry_set));
3128184a5cd2SChao Yu 	if (!sit_entry_set_slab)
3129b01a9201SJaegeuk Kim 		goto destroy_discard_cmd;
313088b88a66SJaegeuk Kim 
313188b88a66SJaegeuk Kim 	inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
313288b88a66SJaegeuk Kim 			sizeof(struct inmem_pages));
313388b88a66SJaegeuk Kim 	if (!inmem_entry_slab)
313488b88a66SJaegeuk Kim 		goto destroy_sit_entry_set;
31357fd9e544SJaegeuk Kim 	return 0;
3136184a5cd2SChao Yu 
313788b88a66SJaegeuk Kim destroy_sit_entry_set:
313888b88a66SJaegeuk Kim 	kmem_cache_destroy(sit_entry_set_slab);
3139b01a9201SJaegeuk Kim destroy_discard_cmd:
3140b01a9201SJaegeuk Kim 	kmem_cache_destroy(discard_cmd_slab);
31416ab2a308SChao Yu destroy_discard_entry:
3142184a5cd2SChao Yu 	kmem_cache_destroy(discard_entry_slab);
3143184a5cd2SChao Yu fail:
3144184a5cd2SChao Yu 	return -ENOMEM;
31457fd9e544SJaegeuk Kim }
31467fd9e544SJaegeuk Kim 
31477fd9e544SJaegeuk Kim void destroy_segment_manager_caches(void)
31487fd9e544SJaegeuk Kim {
3149184a5cd2SChao Yu 	kmem_cache_destroy(sit_entry_set_slab);
3150b01a9201SJaegeuk Kim 	kmem_cache_destroy(discard_cmd_slab);
31517fd9e544SJaegeuk Kim 	kmem_cache_destroy(discard_entry_slab);
315288b88a66SJaegeuk Kim 	kmem_cache_destroy(inmem_entry_slab);
31537fd9e544SJaegeuk Kim }
3154