17c1a000dSChao Yu // SPDX-License-Identifier: GPL-2.0
20a8165d7SJaegeuk Kim /*
3351df4b2SJaegeuk Kim * fs/f2fs/segment.c
4351df4b2SJaegeuk Kim *
5351df4b2SJaegeuk Kim * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6351df4b2SJaegeuk Kim * http://www.samsung.com/
7351df4b2SJaegeuk Kim */
8351df4b2SJaegeuk Kim #include <linux/fs.h>
9351df4b2SJaegeuk Kim #include <linux/f2fs_fs.h>
10351df4b2SJaegeuk Kim #include <linux/bio.h>
11351df4b2SJaegeuk Kim #include <linux/blkdev.h>
124034247aSNeilBrown #include <linux/sched/mm.h>
13690e4a3eSGeert Uytterhoeven #include <linux/prefetch.h>
146b4afdd7SJaegeuk Kim #include <linux/kthread.h>
1574de593aSChao Yu #include <linux/swap.h>
1660b99b48SJaegeuk Kim #include <linux/timer.h>
171d7be270SJaegeuk Kim #include <linux/freezer.h>
181eb1ef4aSJaegeuk Kim #include <linux/sched/signal.h>
196691d940SDaeho Jeong #include <linux/random.h>
20351df4b2SJaegeuk Kim
21351df4b2SJaegeuk Kim #include "f2fs.h"
22351df4b2SJaegeuk Kim #include "segment.h"
23351df4b2SJaegeuk Kim #include "node.h"
245f656541SJaegeuk Kim #include "gc.h"
2552118743SDaeho Jeong #include "iostat.h"
266ec178daSNamjae Jeon #include <trace/events/f2fs.h>
27351df4b2SJaegeuk Kim
289a7f143aSChangman Lee #define __reverse_ffz(x) __reverse_ffs(~(x))
299a7f143aSChangman Lee
307fd9e544SJaegeuk Kim static struct kmem_cache *discard_entry_slab;
31b01a9201SJaegeuk Kim static struct kmem_cache *discard_cmd_slab;
32184a5cd2SChao Yu static struct kmem_cache *sit_entry_set_slab;
333db1de0eSDaeho Jeong static struct kmem_cache *revoke_entry_slab;
347fd9e544SJaegeuk Kim
__reverse_ulong(unsigned char * str)35f96999c3SJaegeuk Kim static unsigned long __reverse_ulong(unsigned char *str)
36f96999c3SJaegeuk Kim {
37f96999c3SJaegeuk Kim unsigned long tmp = 0;
38f96999c3SJaegeuk Kim int shift = 24, idx = 0;
39f96999c3SJaegeuk Kim
40f96999c3SJaegeuk Kim #if BITS_PER_LONG == 64
41f96999c3SJaegeuk Kim shift = 56;
42f96999c3SJaegeuk Kim #endif
43f96999c3SJaegeuk Kim while (shift >= 0) {
44f96999c3SJaegeuk Kim tmp |= (unsigned long)str[idx++] << shift;
45f96999c3SJaegeuk Kim shift -= BITS_PER_BYTE;
46f96999c3SJaegeuk Kim }
47f96999c3SJaegeuk Kim return tmp;
48f96999c3SJaegeuk Kim }
49f96999c3SJaegeuk Kim
509a7f143aSChangman Lee /*
519a7f143aSChangman Lee * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
529a7f143aSChangman Lee * MSB and LSB are reversed in a byte by f2fs_set_bit.
539a7f143aSChangman Lee */
__reverse_ffs(unsigned long word)549a7f143aSChangman Lee static inline unsigned long __reverse_ffs(unsigned long word)
559a7f143aSChangman Lee {
569a7f143aSChangman Lee int num = 0;
579a7f143aSChangman Lee
589a7f143aSChangman Lee #if BITS_PER_LONG == 64
59f96999c3SJaegeuk Kim if ((word & 0xffffffff00000000UL) == 0)
609a7f143aSChangman Lee num += 32;
61f96999c3SJaegeuk Kim else
629a7f143aSChangman Lee word >>= 32;
639a7f143aSChangman Lee #endif
64f96999c3SJaegeuk Kim if ((word & 0xffff0000) == 0)
659a7f143aSChangman Lee num += 16;
66f96999c3SJaegeuk Kim else
679a7f143aSChangman Lee word >>= 16;
68f96999c3SJaegeuk Kim
69f96999c3SJaegeuk Kim if ((word & 0xff00) == 0)
709a7f143aSChangman Lee num += 8;
71f96999c3SJaegeuk Kim else
729a7f143aSChangman Lee word >>= 8;
73f96999c3SJaegeuk Kim
749a7f143aSChangman Lee if ((word & 0xf0) == 0)
759a7f143aSChangman Lee num += 4;
769a7f143aSChangman Lee else
779a7f143aSChangman Lee word >>= 4;
78f96999c3SJaegeuk Kim
799a7f143aSChangman Lee if ((word & 0xc) == 0)
809a7f143aSChangman Lee num += 2;
819a7f143aSChangman Lee else
829a7f143aSChangman Lee word >>= 2;
83f96999c3SJaegeuk Kim
849a7f143aSChangman Lee if ((word & 0x2) == 0)
859a7f143aSChangman Lee num += 1;
869a7f143aSChangman Lee return num;
879a7f143aSChangman Lee }
889a7f143aSChangman Lee
899a7f143aSChangman Lee /*
90e1c42045Sarter97 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
919a7f143aSChangman Lee * f2fs_set_bit makes MSB and LSB reversed in a byte.
92692223d1SFan Li * @size must be integral times of unsigned long.
939a7f143aSChangman Lee * Example:
94f96999c3SJaegeuk Kim * MSB <--> LSB
95f96999c3SJaegeuk Kim * f2fs_set_bit(0, bitmap) => 1000 0000
96f96999c3SJaegeuk Kim * f2fs_set_bit(7, bitmap) => 0000 0001
979a7f143aSChangman Lee */
__find_rev_next_bit(const unsigned long * addr,unsigned long size,unsigned long offset)989a7f143aSChangman Lee static unsigned long __find_rev_next_bit(const unsigned long *addr,
999a7f143aSChangman Lee unsigned long size, unsigned long offset)
1009a7f143aSChangman Lee {
1019a7f143aSChangman Lee const unsigned long *p = addr + BIT_WORD(offset);
102692223d1SFan Li unsigned long result = size;
1039a7f143aSChangman Lee unsigned long tmp;
1049a7f143aSChangman Lee
1059a7f143aSChangman Lee if (offset >= size)
1069a7f143aSChangman Lee return size;
1079a7f143aSChangman Lee
108692223d1SFan Li size -= (offset & ~(BITS_PER_LONG - 1));
1099a7f143aSChangman Lee offset %= BITS_PER_LONG;
110692223d1SFan Li
111692223d1SFan Li while (1) {
112692223d1SFan Li if (*p == 0)
113692223d1SFan Li goto pass;
1149a7f143aSChangman Lee
115f96999c3SJaegeuk Kim tmp = __reverse_ulong((unsigned char *)p);
116692223d1SFan Li
117f96999c3SJaegeuk Kim tmp &= ~0UL >> offset;
1189a7f143aSChangman Lee if (size < BITS_PER_LONG)
119692223d1SFan Li tmp &= (~0UL << (BITS_PER_LONG - size));
1209a7f143aSChangman Lee if (tmp)
121692223d1SFan Li goto found;
122692223d1SFan Li pass:
123692223d1SFan Li if (size <= BITS_PER_LONG)
124692223d1SFan Li break;
1259a7f143aSChangman Lee size -= BITS_PER_LONG;
126692223d1SFan Li offset = 0;
127f96999c3SJaegeuk Kim p++;
1289a7f143aSChangman Lee }
1299a7f143aSChangman Lee return result;
130692223d1SFan Li found:
131692223d1SFan Li return result - size + __reverse_ffs(tmp);
1329a7f143aSChangman Lee }
1339a7f143aSChangman Lee
__find_rev_next_zero_bit(const unsigned long * addr,unsigned long size,unsigned long offset)1349a7f143aSChangman Lee static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
1359a7f143aSChangman Lee unsigned long size, unsigned long offset)
1369a7f143aSChangman Lee {
1379a7f143aSChangman Lee const unsigned long *p = addr + BIT_WORD(offset);
13880609448SJaegeuk Kim unsigned long result = size;
1399a7f143aSChangman Lee unsigned long tmp;
1409a7f143aSChangman Lee
1419a7f143aSChangman Lee if (offset >= size)
1429a7f143aSChangman Lee return size;
1439a7f143aSChangman Lee
14480609448SJaegeuk Kim size -= (offset & ~(BITS_PER_LONG - 1));
1459a7f143aSChangman Lee offset %= BITS_PER_LONG;
14680609448SJaegeuk Kim
14780609448SJaegeuk Kim while (1) {
14880609448SJaegeuk Kim if (*p == ~0UL)
14980609448SJaegeuk Kim goto pass;
1509a7f143aSChangman Lee
151f96999c3SJaegeuk Kim tmp = __reverse_ulong((unsigned char *)p);
152f96999c3SJaegeuk Kim
15380609448SJaegeuk Kim if (offset)
15480609448SJaegeuk Kim tmp |= ~0UL << (BITS_PER_LONG - offset);
1559a7f143aSChangman Lee if (size < BITS_PER_LONG)
15680609448SJaegeuk Kim tmp |= ~0UL >> size;
157f96999c3SJaegeuk Kim if (tmp != ~0UL)
15880609448SJaegeuk Kim goto found;
15980609448SJaegeuk Kim pass:
16080609448SJaegeuk Kim if (size <= BITS_PER_LONG)
16180609448SJaegeuk Kim break;
1629a7f143aSChangman Lee size -= BITS_PER_LONG;
16380609448SJaegeuk Kim offset = 0;
164f96999c3SJaegeuk Kim p++;
1659a7f143aSChangman Lee }
1669a7f143aSChangman Lee return result;
16780609448SJaegeuk Kim found:
16880609448SJaegeuk Kim return result - size + __reverse_ffz(tmp);
1699a7f143aSChangman Lee }
1709a7f143aSChangman Lee
f2fs_need_SSR(struct f2fs_sb_info * sbi)1714d57b86dSChao Yu bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
172b3a97a2aSJaegeuk Kim {
173b3a97a2aSJaegeuk Kim int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
174b3a97a2aSJaegeuk Kim int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
175b3a97a2aSJaegeuk Kim int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
176b3a97a2aSJaegeuk Kim
177b0332a0fSChao Yu if (f2fs_lfs_mode(sbi))
178b3a97a2aSJaegeuk Kim return false;
1790e5e8111SDaeho Jeong if (sbi->gc_mode == GC_URGENT_HIGH)
180b3a97a2aSJaegeuk Kim return true;
1814354994fSDaniel Rosenberg if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1824354994fSDaniel Rosenberg return true;
183b3a97a2aSJaegeuk Kim
184b3a97a2aSJaegeuk Kim return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
185a2a12b67SChao Yu SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
186b3a97a2aSJaegeuk Kim }
187b3a97a2aSJaegeuk Kim
f2fs_abort_atomic_write(struct inode * inode,bool clean)1883db1de0eSDaeho Jeong void f2fs_abort_atomic_write(struct inode *inode, bool clean)
18929b96b54SChao Yu {
1903db1de0eSDaeho Jeong struct f2fs_inode_info *fi = F2FS_I(inode);
19129b96b54SChao Yu
192e53f8643SChao Yu if (!f2fs_is_atomic_file(inode))
193e53f8643SChao Yu return;
194e53f8643SChao Yu
1956966586cSSunmin Jeong if (clean)
1966966586cSSunmin Jeong truncate_inode_pages_final(inode->i_mapping);
1976966586cSSunmin Jeong
198f8e2f32bSDaeho Jeong release_atomic_write_cnt(inode);
1994d8d45dfSDaeho Jeong clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
20041e8f85aSDaeho Jeong clear_inode_flag(inode, FI_ATOMIC_REPLACE);
2013db1de0eSDaeho Jeong clear_inode_flag(inode, FI_ATOMIC_FILE);
202364afd8aSDaeho Jeong if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
203364afd8aSDaeho Jeong clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
204364afd8aSDaeho Jeong f2fs_mark_inode_dirty_sync(inode, true);
205364afd8aSDaeho Jeong }
206b4dac120SChao Yu stat_dec_atomic_inode(inode);
2074d8d45dfSDaeho Jeong
2080e8d040bSChao Yu F2FS_I(inode)->atomic_write_task = NULL;
2090e8d040bSChao Yu
2104d8d45dfSDaeho Jeong if (clean) {
2114d8d45dfSDaeho Jeong f2fs_i_size_write(inode, fi->original_i_size);
2120e8d040bSChao Yu fi->original_i_size = 0;
2134d8d45dfSDaeho Jeong }
214a3ab5574SJaegeuk Kim /* avoid stale dirty inode during eviction */
215a3ab5574SJaegeuk Kim sync_inode_metadata(inode, 0);
2163db1de0eSDaeho Jeong }
21728bc106bSChao Yu
__replace_atomic_write_block(struct inode * inode,pgoff_t index,block_t new_addr,block_t * old_addr,bool recover)2183db1de0eSDaeho Jeong static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
2193db1de0eSDaeho Jeong block_t new_addr, block_t *old_addr, bool recover)
2203db1de0eSDaeho Jeong {
2213db1de0eSDaeho Jeong struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
22228bc106bSChao Yu struct dnode_of_data dn;
22328bc106bSChao Yu struct node_info ni;
2243db1de0eSDaeho Jeong int err;
22528bc106bSChao Yu
2267f2b4e8eSChao Yu retry:
22728bc106bSChao Yu set_new_dnode(&dn, inode, NULL, NULL, 0);
228994b442bSDaeho Jeong err = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
2297f2b4e8eSChao Yu if (err) {
2307f2b4e8eSChao Yu if (err == -ENOMEM) {
2313db1de0eSDaeho Jeong f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
2327f2b4e8eSChao Yu goto retry;
2337f2b4e8eSChao Yu }
2343db1de0eSDaeho Jeong return err;
23528bc106bSChao Yu }
2367735730dSChao Yu
237a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
2387735730dSChao Yu if (err) {
2397735730dSChao Yu f2fs_put_dnode(&dn);
2407735730dSChao Yu return err;
2417735730dSChao Yu }
2427735730dSChao Yu
2433db1de0eSDaeho Jeong if (recover) {
2443db1de0eSDaeho Jeong /* dn.data_blkaddr is always valid */
2453db1de0eSDaeho Jeong if (!__is_valid_data_blkaddr(new_addr)) {
2463db1de0eSDaeho Jeong if (new_addr == NULL_ADDR)
2473db1de0eSDaeho Jeong dec_valid_block_count(sbi, inode, 1);
2484d57b86dSChao Yu f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
2493db1de0eSDaeho Jeong f2fs_update_data_blkaddr(&dn, new_addr);
2503db1de0eSDaeho Jeong } else {
25128bc106bSChao Yu f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
2523db1de0eSDaeho Jeong new_addr, ni.version, true, true);
2533db1de0eSDaeho Jeong }
2543db1de0eSDaeho Jeong } else {
2553db1de0eSDaeho Jeong blkcnt_t count = 1;
2563db1de0eSDaeho Jeong
257bc1e3992SChao Yu err = inc_valid_block_count(sbi, inode, &count, true);
258935fc6faSChao Yu if (err) {
259935fc6faSChao Yu f2fs_put_dnode(&dn);
260935fc6faSChao Yu return err;
261935fc6faSChao Yu }
262935fc6faSChao Yu
2633db1de0eSDaeho Jeong *old_addr = dn.data_blkaddr;
2643db1de0eSDaeho Jeong f2fs_truncate_data_blocks_range(&dn, 1);
2653db1de0eSDaeho Jeong dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
266935fc6faSChao Yu
2673db1de0eSDaeho Jeong f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
2683db1de0eSDaeho Jeong ni.version, true, false);
2693db1de0eSDaeho Jeong }
2703db1de0eSDaeho Jeong
27128bc106bSChao Yu f2fs_put_dnode(&dn);
2722f3a9ae9SChao Yu
2732f3a9ae9SChao Yu trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode,
274da6ea0b0SJaegeuk Kim index, old_addr ? *old_addr : 0, new_addr, recover);
2753db1de0eSDaeho Jeong return 0;
27628bc106bSChao Yu }
27729b96b54SChao Yu
__complete_revoke_list(struct inode * inode,struct list_head * head,bool revoke)2783db1de0eSDaeho Jeong static void __complete_revoke_list(struct inode *inode, struct list_head *head,
2793db1de0eSDaeho Jeong bool revoke)
2803db1de0eSDaeho Jeong {
2813db1de0eSDaeho Jeong struct revoke_entry *cur, *tmp;
282c7dbc066SJaegeuk Kim pgoff_t start_index = 0;
28341e8f85aSDaeho Jeong bool truncate = is_inode_flag_set(inode, FI_ATOMIC_REPLACE);
2843db1de0eSDaeho Jeong
2853db1de0eSDaeho Jeong list_for_each_entry_safe(cur, tmp, head, list) {
286c7dbc066SJaegeuk Kim if (revoke) {
2873db1de0eSDaeho Jeong __replace_atomic_write_block(inode, cur->index,
2883db1de0eSDaeho Jeong cur->old_addr, NULL, true);
289c7dbc066SJaegeuk Kim } else if (truncate) {
290c7dbc066SJaegeuk Kim f2fs_truncate_hole(inode, start_index, cur->index);
291c7dbc066SJaegeuk Kim start_index = cur->index + 1;
292c7dbc066SJaegeuk Kim }
29341e8f85aSDaeho Jeong
29429b96b54SChao Yu list_del(&cur->list);
2953db1de0eSDaeho Jeong kmem_cache_free(revoke_entry_slab, cur);
29629b96b54SChao Yu }
29741e8f85aSDaeho Jeong
29841e8f85aSDaeho Jeong if (!revoke && truncate)
299c7dbc066SJaegeuk Kim f2fs_do_truncate_blocks(inode, start_index * PAGE_SIZE, false);
30029b96b54SChao Yu }
30129b96b54SChao Yu
__f2fs_commit_atomic_write(struct inode * inode)3023db1de0eSDaeho Jeong static int __f2fs_commit_atomic_write(struct inode *inode)
30329b96b54SChao Yu {
30457864ae5SJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
30529b96b54SChao Yu struct f2fs_inode_info *fi = F2FS_I(inode);
3063db1de0eSDaeho Jeong struct inode *cow_inode = fi->cow_inode;
3073db1de0eSDaeho Jeong struct revoke_entry *new;
308cf52b27aSChao Yu struct list_head revoke_list;
3093db1de0eSDaeho Jeong block_t blkaddr;
3103db1de0eSDaeho Jeong struct dnode_of_data dn;
3113db1de0eSDaeho Jeong pgoff_t len = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3123db1de0eSDaeho Jeong pgoff_t off = 0, blen, index;
3133db1de0eSDaeho Jeong int ret = 0, i;
31488b88a66SJaegeuk Kim
315cf52b27aSChao Yu INIT_LIST_HEAD(&revoke_list);
316cf52b27aSChao Yu
3173db1de0eSDaeho Jeong while (len) {
3183db1de0eSDaeho Jeong blen = min_t(pgoff_t, ADDRS_PER_BLOCK(cow_inode), len);
31928bc106bSChao Yu
3203db1de0eSDaeho Jeong set_new_dnode(&dn, cow_inode, NULL, NULL, 0);
3213db1de0eSDaeho Jeong ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
3223db1de0eSDaeho Jeong if (ret && ret != -ENOENT) {
3233db1de0eSDaeho Jeong goto out;
3243db1de0eSDaeho Jeong } else if (ret == -ENOENT) {
3253db1de0eSDaeho Jeong ret = 0;
3263db1de0eSDaeho Jeong if (dn.max_level == 0)
3273db1de0eSDaeho Jeong goto out;
3283db1de0eSDaeho Jeong goto next;
32988b88a66SJaegeuk Kim }
33029b96b54SChao Yu
3313db1de0eSDaeho Jeong blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, cow_inode),
3323db1de0eSDaeho Jeong len);
3333db1de0eSDaeho Jeong index = off;
3343db1de0eSDaeho Jeong for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) {
3353db1de0eSDaeho Jeong blkaddr = f2fs_data_blkaddr(&dn);
33628bc106bSChao Yu
3373db1de0eSDaeho Jeong if (!__is_valid_data_blkaddr(blkaddr)) {
3383db1de0eSDaeho Jeong continue;
3393db1de0eSDaeho Jeong } else if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3403db1de0eSDaeho Jeong DATA_GENERIC_ENHANCE)) {
3413db1de0eSDaeho Jeong f2fs_put_dnode(&dn);
3423db1de0eSDaeho Jeong ret = -EFSCORRUPTED;
34395fa90c9SChao Yu f2fs_handle_error(sbi,
34495fa90c9SChao Yu ERROR_INVALID_BLKADDR);
3453db1de0eSDaeho Jeong goto out;
34628bc106bSChao Yu }
347cf52b27aSChao Yu
3483db1de0eSDaeho Jeong new = f2fs_kmem_cache_alloc(revoke_entry_slab, GFP_NOFS,
3493db1de0eSDaeho Jeong true, NULL);
350cf52b27aSChao Yu
3513db1de0eSDaeho Jeong ret = __replace_atomic_write_block(inode, index, blkaddr,
3523db1de0eSDaeho Jeong &new->old_addr, false);
3533db1de0eSDaeho Jeong if (ret) {
3543db1de0eSDaeho Jeong f2fs_put_dnode(&dn);
3553db1de0eSDaeho Jeong kmem_cache_free(revoke_entry_slab, new);
3563db1de0eSDaeho Jeong goto out;
357cf52b27aSChao Yu }
358cf52b27aSChao Yu
3593db1de0eSDaeho Jeong f2fs_update_data_blkaddr(&dn, NULL_ADDR);
3603db1de0eSDaeho Jeong new->index = index;
3613db1de0eSDaeho Jeong list_add_tail(&new->list, &revoke_list);
3623db1de0eSDaeho Jeong }
3633db1de0eSDaeho Jeong f2fs_put_dnode(&dn);
3643db1de0eSDaeho Jeong next:
3653db1de0eSDaeho Jeong off += blen;
3663db1de0eSDaeho Jeong len -= blen;
3673db1de0eSDaeho Jeong }
3683db1de0eSDaeho Jeong
3693db1de0eSDaeho Jeong out:
3704d8d45dfSDaeho Jeong if (ret) {
371f8e2f32bSDaeho Jeong sbi->revoked_atomic_block += fi->atomic_write_cnt;
3724d8d45dfSDaeho Jeong } else {
373f8e2f32bSDaeho Jeong sbi->committed_atomic_block += fi->atomic_write_cnt;
3744d8d45dfSDaeho Jeong set_inode_flag(inode, FI_ATOMIC_COMMITTED);
375364afd8aSDaeho Jeong if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
376364afd8aSDaeho Jeong clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
377364afd8aSDaeho Jeong f2fs_mark_inode_dirty_sync(inode, true);
378364afd8aSDaeho Jeong }
3794d8d45dfSDaeho Jeong }
380f8e2f32bSDaeho Jeong
3813db1de0eSDaeho Jeong __complete_revoke_list(inode, &revoke_list, ret ? true : false);
3823db1de0eSDaeho Jeong
3833db1de0eSDaeho Jeong return ret;
3843db1de0eSDaeho Jeong }
3853db1de0eSDaeho Jeong
f2fs_commit_atomic_write(struct inode * inode)3863db1de0eSDaeho Jeong int f2fs_commit_atomic_write(struct inode *inode)
387cf52b27aSChao Yu {
388cf52b27aSChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
389cf52b27aSChao Yu struct f2fs_inode_info *fi = F2FS_I(inode);
390cf52b27aSChao Yu int err;
391cf52b27aSChao Yu
3923db1de0eSDaeho Jeong err = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3933db1de0eSDaeho Jeong if (err)
3943db1de0eSDaeho Jeong return err;
395cf52b27aSChao Yu
396e4544b63STim Murray f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
3976f8d4455SJaegeuk Kim f2fs_lock_op(sbi);
398cf52b27aSChao Yu
3993db1de0eSDaeho Jeong err = __f2fs_commit_atomic_write(inode);
4005fe45743SChao Yu
40188b88a66SJaegeuk Kim f2fs_unlock_op(sbi);
402e4544b63STim Murray f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
4036f8d4455SJaegeuk Kim
404edb27deeSJaegeuk Kim return err;
40588b88a66SJaegeuk Kim }
40688b88a66SJaegeuk Kim
4070a8165d7SJaegeuk Kim /*
408351df4b2SJaegeuk Kim * This function balances dirty node and dentry pages.
409351df4b2SJaegeuk Kim * In addition, it controls garbage collection.
410351df4b2SJaegeuk Kim */
f2fs_balance_fs(struct f2fs_sb_info * sbi,bool need)4112c4db1a6SJaegeuk Kim void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
412351df4b2SJaegeuk Kim {
413c40e15a9SYangtao Li if (time_to_inject(sbi, FAULT_CHECKPOINT))
414a9cfee0eSChao Yu f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT);
4150f348028SChao Yu
416e589c2c4SJaegeuk Kim /* balance_fs_bg is able to be pending */
417a7881893SJaegeuk Kim if (need && excess_cached_nats(sbi))
4187bcd0cfaSChao Yu f2fs_balance_fs_bg(sbi, false);
419e589c2c4SJaegeuk Kim
42000e09c0bSChao Yu if (!f2fs_is_checkpoint_ready(sbi))
4214354994fSDaniel Rosenberg return;
4224354994fSDaniel Rosenberg
423351df4b2SJaegeuk Kim /*
424029cd28cSJaegeuk Kim * We should do GC or end up with checkpoint, if there are so many dirty
425029cd28cSJaegeuk Kim * dir/node pages without enough free segments.
426351df4b2SJaegeuk Kim */
427c1660d88SYangtao Li if (has_enough_free_secs(sbi, 0, 0))
428c1660d88SYangtao Li return;
429c1660d88SYangtao Li
4305911d2d1SChao Yu if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
4315911d2d1SChao Yu sbi->gc_thread->f2fs_gc_task) {
4325911d2d1SChao Yu DEFINE_WAIT(wait);
4335911d2d1SChao Yu
4345911d2d1SChao Yu prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
4355911d2d1SChao Yu TASK_UNINTERRUPTIBLE);
4365911d2d1SChao Yu wake_up(&sbi->gc_thread->gc_wait_queue_head);
4375911d2d1SChao Yu io_schedule();
4385911d2d1SChao Yu finish_wait(&sbi->gc_thread->fggc_wq, &wait);
4395911d2d1SChao Yu } else {
440d147ea4aSJaegeuk Kim struct f2fs_gc_control gc_control = {
441d147ea4aSJaegeuk Kim .victim_segno = NULL_SEGNO,
442d147ea4aSJaegeuk Kim .init_gc_type = BG_GC,
443d147ea4aSJaegeuk Kim .no_bg_gc = true,
444d147ea4aSJaegeuk Kim .should_migrate_blocks = false,
445c81d5baeSJaegeuk Kim .err_gc_skipped = false,
446c81d5baeSJaegeuk Kim .nr_free_secs = 1 };
447e4544b63STim Murray f2fs_down_write(&sbi->gc_lock);
4489bf1dcbdSChao Yu stat_inc_gc_call_count(sbi, FOREGROUND);
449d147ea4aSJaegeuk Kim f2fs_gc(sbi, &gc_control);
450351df4b2SJaegeuk Kim }
451351df4b2SJaegeuk Kim }
452351df4b2SJaegeuk Kim
excess_dirty_threshold(struct f2fs_sb_info * sbi)453287b1406SChao Yu static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
454287b1406SChao Yu {
455e4544b63STim Murray int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
456287b1406SChao Yu unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
457287b1406SChao Yu unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
458287b1406SChao Yu unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
459287b1406SChao Yu unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
460287b1406SChao Yu unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
461f0248ba6SJaegeuk Kim unsigned int threshold = (factor * DEFAULT_DIRTY_THRESHOLD) <<
462f0248ba6SJaegeuk Kim sbi->log_blocks_per_seg;
463287b1406SChao Yu unsigned int global_threshold = threshold * 3 / 2;
464287b1406SChao Yu
465287b1406SChao Yu if (dents >= threshold || qdata >= threshold ||
466287b1406SChao Yu nodes >= threshold || meta >= threshold ||
467287b1406SChao Yu imeta >= threshold)
468287b1406SChao Yu return true;
469287b1406SChao Yu return dents + qdata + nodes + meta + imeta > global_threshold;
470287b1406SChao Yu }
471287b1406SChao Yu
f2fs_balance_fs_bg(struct f2fs_sb_info * sbi,bool from_bg)4727bcd0cfaSChao Yu void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
4734660f9c0SJaegeuk Kim {
47464c74a7aSChao Yu if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
47564c74a7aSChao Yu return;
47664c74a7aSChao Yu
4771dcc336bSChao Yu /* try to shrink extent cache when there is no enough memory */
47812607c1bSJaegeuk Kim if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE))
479e7547dacSJaegeuk Kim f2fs_shrink_read_extent_tree(sbi,
480e7547dacSJaegeuk Kim READ_EXTENT_CACHE_SHRINK_NUMBER);
4811dcc336bSChao Yu
48271644dffSJaegeuk Kim /* try to shrink age extent cache when there is no enough memory */
48371644dffSJaegeuk Kim if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE))
48471644dffSJaegeuk Kim f2fs_shrink_age_extent_tree(sbi,
48571644dffSJaegeuk Kim AGE_EXTENT_CACHE_SHRINK_NUMBER);
486e5e7ea3cSJaegeuk Kim
4871b38dc8eSJaegeuk Kim /* check the # of cached NAT entries */
4884d57b86dSChao Yu if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
4894d57b86dSChao Yu f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
4901b38dc8eSJaegeuk Kim
4914d57b86dSChao Yu if (!f2fs_available_free_memory(sbi, FREE_NIDS))
4924d57b86dSChao Yu f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
493ad4edb83SJaegeuk Kim else
4944d57b86dSChao Yu f2fs_build_free_nids(sbi, false, false);
49531696580SChao Yu
496287b1406SChao Yu if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
497287b1406SChao Yu excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi))
498493720a4SChao Yu goto do_sync;
499493720a4SChao Yu
500493720a4SChao Yu /* there is background inflight IO or foreground operation recently */
501493720a4SChao Yu if (is_inflight_io(sbi, REQ_TIME) ||
502e4544b63STim Murray (!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem)))
503f455c8a5SJaegeuk Kim return;
504e5e7ea3cSJaegeuk Kim
505493720a4SChao Yu /* exceed periodical checkpoint timeout threshold */
506493720a4SChao Yu if (f2fs_time_over(sbi, CP_TIME))
507493720a4SChao Yu goto do_sync;
508493720a4SChao Yu
5094660f9c0SJaegeuk Kim /* checkpoint is the only way to shrink partial cached entries */
510cd6d697aSChao Yu if (f2fs_available_free_memory(sbi, NAT_ENTRIES) &&
511493720a4SChao Yu f2fs_available_free_memory(sbi, INO_ENTRIES))
512493720a4SChao Yu return;
513493720a4SChao Yu
514493720a4SChao Yu do_sync:
5157bcd0cfaSChao Yu if (test_opt(sbi, DATA_FLUSH) && from_bg) {
516e9f5b8b8SChao Yu struct blk_plug plug;
517e9f5b8b8SChao Yu
518040d2bb3SChao Yu mutex_lock(&sbi->flush_lock);
519040d2bb3SChao Yu
520e9f5b8b8SChao Yu blk_start_plug(&plug);
521d80afefbSChao Yu f2fs_sync_dirty_inodes(sbi, FILE_INODE, false);
522e9f5b8b8SChao Yu blk_finish_plug(&plug);
523040d2bb3SChao Yu
524040d2bb3SChao Yu mutex_unlock(&sbi->flush_lock);
525e9f5b8b8SChao Yu }
526eb61c2ccSChao Yu stat_inc_cp_call_count(sbi, BACKGROUND);
527ddd3b16cSZhang Qilong f2fs_sync_fs(sbi->sb, 1);
5284660f9c0SJaegeuk Kim }
5294660f9c0SJaegeuk Kim
__submit_flush_wait(struct f2fs_sb_info * sbi,struct block_device * bdev)53020fda56bSKinglong Mee static int __submit_flush_wait(struct f2fs_sb_info *sbi,
53120fda56bSKinglong Mee struct block_device *bdev)
5323c62be17SJaegeuk Kim {
53325ac8426SChristoph Hellwig int ret = blkdev_issue_flush(bdev);
53420fda56bSKinglong Mee
53520fda56bSKinglong Mee trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
53620fda56bSKinglong Mee test_opt(sbi, FLUSH_MERGE), ret);
537193a639fSYangtao Li if (!ret)
538193a639fSYangtao Li f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0);
5393c62be17SJaegeuk Kim return ret;
5403c62be17SJaegeuk Kim }
5413c62be17SJaegeuk Kim
submit_flush_wait(struct f2fs_sb_info * sbi,nid_t ino)54239d787beSChao Yu static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
5433c62be17SJaegeuk Kim {
54439d787beSChao Yu int ret = 0;
5453c62be17SJaegeuk Kim int i;
5463c62be17SJaegeuk Kim
5470916878dSDamien Le Moal if (!f2fs_is_multi_device(sbi))
54839d787beSChao Yu return __submit_flush_wait(sbi, sbi->sb->s_bdev);
54920fda56bSKinglong Mee
55039d787beSChao Yu for (i = 0; i < sbi->s_ndevs; i++) {
5514d57b86dSChao Yu if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
55239d787beSChao Yu continue;
55320fda56bSKinglong Mee ret = __submit_flush_wait(sbi, FDEV(i).bdev);
5543c62be17SJaegeuk Kim if (ret)
5553c62be17SJaegeuk Kim break;
5563c62be17SJaegeuk Kim }
5573c62be17SJaegeuk Kim return ret;
5583c62be17SJaegeuk Kim }
5593c62be17SJaegeuk Kim
issue_flush_thread(void * data)5602163d198SGu Zheng static int issue_flush_thread(void *data)
5616b4afdd7SJaegeuk Kim {
5626b4afdd7SJaegeuk Kim struct f2fs_sb_info *sbi = data;
563b01a9201SJaegeuk Kim struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
564a688b9d9SGu Zheng wait_queue_head_t *q = &fcc->flush_wait_queue;
5656b4afdd7SJaegeuk Kim repeat:
5666b4afdd7SJaegeuk Kim if (kthread_should_stop())
5676b4afdd7SJaegeuk Kim return 0;
5686b4afdd7SJaegeuk Kim
569721bd4d5SGu Zheng if (!llist_empty(&fcc->issue_list)) {
5706b4afdd7SJaegeuk Kim struct flush_cmd *cmd, *next;
5716b4afdd7SJaegeuk Kim int ret;
5726b4afdd7SJaegeuk Kim
573721bd4d5SGu Zheng fcc->dispatch_list = llist_del_all(&fcc->issue_list);
574721bd4d5SGu Zheng fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
575721bd4d5SGu Zheng
57639d787beSChao Yu cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
57739d787beSChao Yu
57839d787beSChao Yu ret = submit_flush_wait(sbi, cmd->ino);
5798b8dd65fSChao Yu atomic_inc(&fcc->issued_flush);
5808b8dd65fSChao Yu
581721bd4d5SGu Zheng llist_for_each_entry_safe(cmd, next,
582721bd4d5SGu Zheng fcc->dispatch_list, llnode) {
5836b4afdd7SJaegeuk Kim cmd->ret = ret;
5846b4afdd7SJaegeuk Kim complete(&cmd->wait);
5856b4afdd7SJaegeuk Kim }
586a688b9d9SGu Zheng fcc->dispatch_list = NULL;
5876b4afdd7SJaegeuk Kim }
5886b4afdd7SJaegeuk Kim
589a688b9d9SGu Zheng wait_event_interruptible(*q,
590721bd4d5SGu Zheng kthread_should_stop() || !llist_empty(&fcc->issue_list));
5916b4afdd7SJaegeuk Kim goto repeat;
5926b4afdd7SJaegeuk Kim }
5936b4afdd7SJaegeuk Kim
f2fs_issue_flush(struct f2fs_sb_info * sbi,nid_t ino)59439d787beSChao Yu int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
5956b4afdd7SJaegeuk Kim {
596b01a9201SJaegeuk Kim struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
597adf8d90bSChao Yu struct flush_cmd cmd;
5988b8dd65fSChao Yu int ret;
5996b4afdd7SJaegeuk Kim
6000f7b2abdSJaegeuk Kim if (test_opt(sbi, NOBARRIER))
6010f7b2abdSJaegeuk Kim return 0;
6020f7b2abdSJaegeuk Kim
6038b8dd65fSChao Yu if (!test_opt(sbi, FLUSH_MERGE)) {
60472691af6SJaegeuk Kim atomic_inc(&fcc->queued_flush);
60539d787beSChao Yu ret = submit_flush_wait(sbi, ino);
60672691af6SJaegeuk Kim atomic_dec(&fcc->queued_flush);
6078b8dd65fSChao Yu atomic_inc(&fcc->issued_flush);
6088b8dd65fSChao Yu return ret;
6098b8dd65fSChao Yu }
6108b8dd65fSChao Yu
6110916878dSDamien Le Moal if (atomic_inc_return(&fcc->queued_flush) == 1 ||
6120916878dSDamien Le Moal f2fs_is_multi_device(sbi)) {
61339d787beSChao Yu ret = submit_flush_wait(sbi, ino);
61472691af6SJaegeuk Kim atomic_dec(&fcc->queued_flush);
6158b8dd65fSChao Yu
6168b8dd65fSChao Yu atomic_inc(&fcc->issued_flush);
617740432f8SJaegeuk Kim return ret;
618740432f8SJaegeuk Kim }
6196b4afdd7SJaegeuk Kim
62039d787beSChao Yu cmd.ino = ino;
621adf8d90bSChao Yu init_completion(&cmd.wait);
6226b4afdd7SJaegeuk Kim
623721bd4d5SGu Zheng llist_add(&cmd.llnode, &fcc->issue_list);
6246b4afdd7SJaegeuk Kim
6253b42c741SChao Yu /*
6263b42c741SChao Yu * update issue_list before we wake up issue_flush thread, this
6273b42c741SChao Yu * smp_mb() pairs with another barrier in ___wait_event(), see
6283b42c741SChao Yu * more details in comments of waitqueue_active().
6293b42c741SChao Yu */
6306f890df0SChao Yu smp_mb();
6316f890df0SChao Yu
6326f890df0SChao Yu if (waitqueue_active(&fcc->flush_wait_queue))
633a688b9d9SGu Zheng wake_up(&fcc->flush_wait_queue);
6346b4afdd7SJaegeuk Kim
6355eba8c5dSJaegeuk Kim if (fcc->f2fs_issue_flush) {
636adf8d90bSChao Yu wait_for_completion(&cmd.wait);
63772691af6SJaegeuk Kim atomic_dec(&fcc->queued_flush);
6385eba8c5dSJaegeuk Kim } else {
639d3238691SChao Yu struct llist_node *list;
640d3238691SChao Yu
641d3238691SChao Yu list = llist_del_all(&fcc->issue_list);
642d3238691SChao Yu if (!list) {
643d3238691SChao Yu wait_for_completion(&cmd.wait);
64472691af6SJaegeuk Kim atomic_dec(&fcc->queued_flush);
645d3238691SChao Yu } else {
646d3238691SChao Yu struct flush_cmd *tmp, *next;
647d3238691SChao Yu
64839d787beSChao Yu ret = submit_flush_wait(sbi, ino);
649d3238691SChao Yu
650d3238691SChao Yu llist_for_each_entry_safe(tmp, next, list, llnode) {
651d3238691SChao Yu if (tmp == &cmd) {
652d3238691SChao Yu cmd.ret = ret;
65372691af6SJaegeuk Kim atomic_dec(&fcc->queued_flush);
654d3238691SChao Yu continue;
655d3238691SChao Yu }
656d3238691SChao Yu tmp->ret = ret;
657d3238691SChao Yu complete(&tmp->wait);
658d3238691SChao Yu }
659d3238691SChao Yu }
6605eba8c5dSJaegeuk Kim }
661adf8d90bSChao Yu
662adf8d90bSChao Yu return cmd.ret;
6636b4afdd7SJaegeuk Kim }
6646b4afdd7SJaegeuk Kim
f2fs_create_flush_cmd_control(struct f2fs_sb_info * sbi)6654d57b86dSChao Yu int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
6662163d198SGu Zheng {
6672163d198SGu Zheng dev_t dev = sbi->sb->s_bdev->bd_dev;
6682163d198SGu Zheng struct flush_cmd_control *fcc;
6692163d198SGu Zheng
670b01a9201SJaegeuk Kim if (SM_I(sbi)->fcc_info) {
671b01a9201SJaegeuk Kim fcc = SM_I(sbi)->fcc_info;
672d871cd04SYunlong Song if (fcc->f2fs_issue_flush)
67344b9d01fSYangtao Li return 0;
6745eba8c5dSJaegeuk Kim goto init_thread;
6755eba8c5dSJaegeuk Kim }
6765eba8c5dSJaegeuk Kim
677acbf054dSChao Yu fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
6782163d198SGu Zheng if (!fcc)
6792163d198SGu Zheng return -ENOMEM;
6808b8dd65fSChao Yu atomic_set(&fcc->issued_flush, 0);
68172691af6SJaegeuk Kim atomic_set(&fcc->queued_flush, 0);
6822163d198SGu Zheng init_waitqueue_head(&fcc->flush_wait_queue);
683721bd4d5SGu Zheng init_llist_head(&fcc->issue_list);
684b01a9201SJaegeuk Kim SM_I(sbi)->fcc_info = fcc;
685d4fdf8baSYunlei He if (!test_opt(sbi, FLUSH_MERGE))
68644b9d01fSYangtao Li return 0;
687d4fdf8baSYunlei He
6885eba8c5dSJaegeuk Kim init_thread:
6892163d198SGu Zheng fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
6902163d198SGu Zheng "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
6912163d198SGu Zheng if (IS_ERR(fcc->f2fs_issue_flush)) {
692146dbcbfSYangtao Li int err = PTR_ERR(fcc->f2fs_issue_flush);
693146dbcbfSYangtao Li
694b3d83066SChao Yu fcc->f2fs_issue_flush = NULL;
6952163d198SGu Zheng return err;
6962163d198SGu Zheng }
6972163d198SGu Zheng
69844b9d01fSYangtao Li return 0;
6992163d198SGu Zheng }
7002163d198SGu Zheng
f2fs_destroy_flush_cmd_control(struct f2fs_sb_info * sbi,bool free)7014d57b86dSChao Yu void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
7022163d198SGu Zheng {
703b01a9201SJaegeuk Kim struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
7042163d198SGu Zheng
7055eba8c5dSJaegeuk Kim if (fcc && fcc->f2fs_issue_flush) {
7065eba8c5dSJaegeuk Kim struct task_struct *flush_thread = fcc->f2fs_issue_flush;
7075eba8c5dSJaegeuk Kim
7085eba8c5dSJaegeuk Kim fcc->f2fs_issue_flush = NULL;
7095eba8c5dSJaegeuk Kim kthread_stop(flush_thread);
7105eba8c5dSJaegeuk Kim }
7115eba8c5dSJaegeuk Kim if (free) {
712c8eb7024SChao Yu kfree(fcc);
713b01a9201SJaegeuk Kim SM_I(sbi)->fcc_info = NULL;
7142163d198SGu Zheng }
7155eba8c5dSJaegeuk Kim }
7162163d198SGu Zheng
f2fs_flush_device_cache(struct f2fs_sb_info * sbi)7171228b482SChao Yu int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
7181228b482SChao Yu {
7191228b482SChao Yu int ret = 0, i;
7201228b482SChao Yu
7210916878dSDamien Le Moal if (!f2fs_is_multi_device(sbi))
7221228b482SChao Yu return 0;
7231228b482SChao Yu
7246ed29fe1SChao Yu if (test_opt(sbi, NOBARRIER))
7256ed29fe1SChao Yu return 0;
7266ed29fe1SChao Yu
7271228b482SChao Yu for (i = 1; i < sbi->s_ndevs; i++) {
72891803392SChao Yu int count = DEFAULT_RETRY_IO_COUNT;
72991803392SChao Yu
7301228b482SChao Yu if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
7311228b482SChao Yu continue;
73291803392SChao Yu
73391803392SChao Yu do {
7341228b482SChao Yu ret = __submit_flush_wait(sbi, FDEV(i).bdev);
7351228b482SChao Yu if (ret)
736a64239d0SNeilBrown f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
73791803392SChao Yu } while (ret && --count);
73891803392SChao Yu
73991803392SChao Yu if (ret) {
740a9cfee0eSChao Yu f2fs_stop_checkpoint(sbi, false,
741a9cfee0eSChao Yu STOP_CP_REASON_FLUSH_FAIL);
7421228b482SChao Yu break;
74391803392SChao Yu }
7441228b482SChao Yu
7451228b482SChao Yu spin_lock(&sbi->dev_lock);
7461228b482SChao Yu f2fs_clear_bit(i, (char *)&sbi->dirty_device);
7471228b482SChao Yu spin_unlock(&sbi->dev_lock);
7481228b482SChao Yu }
7491228b482SChao Yu
7501228b482SChao Yu return ret;
7511228b482SChao Yu }
7521228b482SChao Yu
__locate_dirty_segment(struct f2fs_sb_info * sbi,unsigned int segno,enum dirty_type dirty_type)753351df4b2SJaegeuk Kim static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
754351df4b2SJaegeuk Kim enum dirty_type dirty_type)
755351df4b2SJaegeuk Kim {
756351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
757351df4b2SJaegeuk Kim
758351df4b2SJaegeuk Kim /* need not be added */
759351df4b2SJaegeuk Kim if (IS_CURSEG(sbi, segno))
760351df4b2SJaegeuk Kim return;
761351df4b2SJaegeuk Kim
762351df4b2SJaegeuk Kim if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
763351df4b2SJaegeuk Kim dirty_i->nr_dirty[dirty_type]++;
764351df4b2SJaegeuk Kim
765351df4b2SJaegeuk Kim if (dirty_type == DIRTY) {
766351df4b2SJaegeuk Kim struct seg_entry *sentry = get_seg_entry(sbi, segno);
7674625d6aaSChangman Lee enum dirty_type t = sentry->type;
768b2f2c390SJaegeuk Kim
769ec325b52SJaegeuk Kim if (unlikely(t >= DIRTY)) {
770ec325b52SJaegeuk Kim f2fs_bug_on(sbi, 1);
771ec325b52SJaegeuk Kim return;
772ec325b52SJaegeuk Kim }
7734625d6aaSChangman Lee if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
7744625d6aaSChangman Lee dirty_i->nr_dirty[t]++;
775da52f8adSJack Qiu
776da52f8adSJack Qiu if (__is_large_section(sbi)) {
777da52f8adSJack Qiu unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
778123aaf77SShin'ichiro Kawasaki block_t valid_blocks =
779da52f8adSJack Qiu get_valid_blocks(sbi, segno, true);
780da52f8adSJack Qiu
781da52f8adSJack Qiu f2fs_bug_on(sbi, unlikely(!valid_blocks ||
782074b5ea2SJaegeuk Kim valid_blocks == CAP_BLKS_PER_SEC(sbi)));
783da52f8adSJack Qiu
784da52f8adSJack Qiu if (!IS_CURSEC(sbi, secno))
785da52f8adSJack Qiu set_bit(secno, dirty_i->dirty_secmap);
786da52f8adSJack Qiu }
787351df4b2SJaegeuk Kim }
788351df4b2SJaegeuk Kim }
789351df4b2SJaegeuk Kim
__remove_dirty_segment(struct f2fs_sb_info * sbi,unsigned int segno,enum dirty_type dirty_type)790351df4b2SJaegeuk Kim static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
791351df4b2SJaegeuk Kim enum dirty_type dirty_type)
792351df4b2SJaegeuk Kim {
793351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
794123aaf77SShin'ichiro Kawasaki block_t valid_blocks;
795351df4b2SJaegeuk Kim
796351df4b2SJaegeuk Kim if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
797351df4b2SJaegeuk Kim dirty_i->nr_dirty[dirty_type]--;
798351df4b2SJaegeuk Kim
799351df4b2SJaegeuk Kim if (dirty_type == DIRTY) {
8004625d6aaSChangman Lee struct seg_entry *sentry = get_seg_entry(sbi, segno);
8014625d6aaSChangman Lee enum dirty_type t = sentry->type;
802b2f2c390SJaegeuk Kim
8034625d6aaSChangman Lee if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
804b2f2c390SJaegeuk Kim dirty_i->nr_dirty[t]--;
805b2f2c390SJaegeuk Kim
806da52f8adSJack Qiu valid_blocks = get_valid_blocks(sbi, segno, true);
807da52f8adSJack Qiu if (valid_blocks == 0) {
8084ddb1a4dSJaegeuk Kim clear_bit(GET_SEC_FROM_SEG(sbi, segno),
8095ec4e49fSJaegeuk Kim dirty_i->victim_secmap);
810bbf9f7d9SSahitya Tummala #ifdef CONFIG_F2FS_CHECK_FS
811bbf9f7d9SSahitya Tummala clear_bit(segno, SIT_I(sbi)->invalid_segmap);
812bbf9f7d9SSahitya Tummala #endif
813bbf9f7d9SSahitya Tummala }
814da52f8adSJack Qiu if (__is_large_section(sbi)) {
815da52f8adSJack Qiu unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
816da52f8adSJack Qiu
817da52f8adSJack Qiu if (!valid_blocks ||
818074b5ea2SJaegeuk Kim valid_blocks == CAP_BLKS_PER_SEC(sbi)) {
819da52f8adSJack Qiu clear_bit(secno, dirty_i->dirty_secmap);
820da52f8adSJack Qiu return;
821da52f8adSJack Qiu }
822da52f8adSJack Qiu
823da52f8adSJack Qiu if (!IS_CURSEC(sbi, secno))
824da52f8adSJack Qiu set_bit(secno, dirty_i->dirty_secmap);
825da52f8adSJack Qiu }
826351df4b2SJaegeuk Kim }
827351df4b2SJaegeuk Kim }
828351df4b2SJaegeuk Kim
8290a8165d7SJaegeuk Kim /*
830351df4b2SJaegeuk Kim * Should not occur error such as -ENOMEM.
831351df4b2SJaegeuk Kim * Adding dirty entry into seglist is not critical operation.
832351df4b2SJaegeuk Kim * If a given segment is one of current working segments, it won't be added.
833351df4b2SJaegeuk Kim */
locate_dirty_segment(struct f2fs_sb_info * sbi,unsigned int segno)8348d8451afSHaicheng Li static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
835351df4b2SJaegeuk Kim {
836351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
8374354994fSDaniel Rosenberg unsigned short valid_blocks, ckpt_valid_blocks;
838de881df9SAravind Ramesh unsigned int usable_blocks;
839351df4b2SJaegeuk Kim
840351df4b2SJaegeuk Kim if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
841351df4b2SJaegeuk Kim return;
842351df4b2SJaegeuk Kim
843de881df9SAravind Ramesh usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
844351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock);
845351df4b2SJaegeuk Kim
846302bd348SJaegeuk Kim valid_blocks = get_valid_blocks(sbi, segno, false);
84761461fc9SChao Yu ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
848351df4b2SJaegeuk Kim
8494354994fSDaniel Rosenberg if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
850de881df9SAravind Ramesh ckpt_valid_blocks == usable_blocks)) {
851351df4b2SJaegeuk Kim __locate_dirty_segment(sbi, segno, PRE);
852351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, segno, DIRTY);
853de881df9SAravind Ramesh } else if (valid_blocks < usable_blocks) {
854351df4b2SJaegeuk Kim __locate_dirty_segment(sbi, segno, DIRTY);
855351df4b2SJaegeuk Kim } else {
856351df4b2SJaegeuk Kim /* Recovery routine with SSR needs this */
857351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, segno, DIRTY);
858351df4b2SJaegeuk Kim }
859351df4b2SJaegeuk Kim
860351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock);
861351df4b2SJaegeuk Kim }
862351df4b2SJaegeuk Kim
8634354994fSDaniel Rosenberg /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
f2fs_dirty_to_prefree(struct f2fs_sb_info * sbi)8644354994fSDaniel Rosenberg void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
8654354994fSDaniel Rosenberg {
8664354994fSDaniel Rosenberg struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
8674354994fSDaniel Rosenberg unsigned int segno;
8684354994fSDaniel Rosenberg
8694354994fSDaniel Rosenberg mutex_lock(&dirty_i->seglist_lock);
8704354994fSDaniel Rosenberg for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
8714354994fSDaniel Rosenberg if (get_valid_blocks(sbi, segno, false))
8724354994fSDaniel Rosenberg continue;
8734354994fSDaniel Rosenberg if (IS_CURSEG(sbi, segno))
8744354994fSDaniel Rosenberg continue;
8754354994fSDaniel Rosenberg __locate_dirty_segment(sbi, segno, PRE);
8764354994fSDaniel Rosenberg __remove_dirty_segment(sbi, segno, DIRTY);
8774354994fSDaniel Rosenberg }
8784354994fSDaniel Rosenberg mutex_unlock(&dirty_i->seglist_lock);
8794354994fSDaniel Rosenberg }
8804354994fSDaniel Rosenberg
f2fs_get_unusable_blocks(struct f2fs_sb_info * sbi)8814d3aed70SDaniel Rosenberg block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
8824354994fSDaniel Rosenberg {
883ae4ad7eaSDaniel Rosenberg int ovp_hole_segs =
884ae4ad7eaSDaniel Rosenberg (overprovision_segments(sbi) - reserved_segments(sbi));
885ae4ad7eaSDaniel Rosenberg block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
8864d3aed70SDaniel Rosenberg struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
8874354994fSDaniel Rosenberg block_t holes[2] = {0, 0}; /* DATA and NODE */
8884d3aed70SDaniel Rosenberg block_t unusable;
8894354994fSDaniel Rosenberg struct seg_entry *se;
8904354994fSDaniel Rosenberg unsigned int segno;
8914354994fSDaniel Rosenberg
8924354994fSDaniel Rosenberg mutex_lock(&dirty_i->seglist_lock);
8934354994fSDaniel Rosenberg for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
8944354994fSDaniel Rosenberg se = get_seg_entry(sbi, segno);
8954354994fSDaniel Rosenberg if (IS_NODESEG(se->type))
896de881df9SAravind Ramesh holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
897de881df9SAravind Ramesh se->valid_blocks;
8984354994fSDaniel Rosenberg else
899de881df9SAravind Ramesh holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
900de881df9SAravind Ramesh se->valid_blocks;
9014354994fSDaniel Rosenberg }
9024354994fSDaniel Rosenberg mutex_unlock(&dirty_i->seglist_lock);
9034354994fSDaniel Rosenberg
9043b21b794Swangkailong@jari.cn unusable = max(holes[DATA], holes[NODE]);
9054d3aed70SDaniel Rosenberg if (unusable > ovp_holes)
9064d3aed70SDaniel Rosenberg return unusable - ovp_holes;
9074d3aed70SDaniel Rosenberg return 0;
9084d3aed70SDaniel Rosenberg }
9094d3aed70SDaniel Rosenberg
f2fs_disable_cp_again(struct f2fs_sb_info * sbi,block_t unusable)9104d3aed70SDaniel Rosenberg int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
9114d3aed70SDaniel Rosenberg {
9124d3aed70SDaniel Rosenberg int ovp_hole_segs =
9134d3aed70SDaniel Rosenberg (overprovision_segments(sbi) - reserved_segments(sbi));
9144d3aed70SDaniel Rosenberg if (unusable > F2FS_OPTION(sbi).unusable_cap)
9154354994fSDaniel Rosenberg return -EAGAIN;
916db610a64SJaegeuk Kim if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
917ae4ad7eaSDaniel Rosenberg dirty_segments(sbi) > ovp_hole_segs)
918db610a64SJaegeuk Kim return -EAGAIN;
9194354994fSDaniel Rosenberg return 0;
9204354994fSDaniel Rosenberg }
9214354994fSDaniel Rosenberg
9224354994fSDaniel Rosenberg /* This is only used by SBI_CP_DISABLED */
get_free_segment(struct f2fs_sb_info * sbi)9234354994fSDaniel Rosenberg static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
9244354994fSDaniel Rosenberg {
9254354994fSDaniel Rosenberg struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
9264354994fSDaniel Rosenberg unsigned int segno = 0;
9274354994fSDaniel Rosenberg
9284354994fSDaniel Rosenberg mutex_lock(&dirty_i->seglist_lock);
9294354994fSDaniel Rosenberg for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
9304354994fSDaniel Rosenberg if (get_valid_blocks(sbi, segno, false))
9314354994fSDaniel Rosenberg continue;
93261461fc9SChao Yu if (get_ckpt_valid_blocks(sbi, segno, false))
9334354994fSDaniel Rosenberg continue;
9344354994fSDaniel Rosenberg mutex_unlock(&dirty_i->seglist_lock);
9354354994fSDaniel Rosenberg return segno;
9364354994fSDaniel Rosenberg }
9374354994fSDaniel Rosenberg mutex_unlock(&dirty_i->seglist_lock);
9384354994fSDaniel Rosenberg return NULL_SEGNO;
9394354994fSDaniel Rosenberg }
9404354994fSDaniel Rosenberg
__create_discard_cmd(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t lstart,block_t start,block_t len)941004b6862SChao Yu static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
942c81abe34SJaegeuk Kim struct block_device *bdev, block_t lstart,
943c81abe34SJaegeuk Kim block_t start, block_t len)
944275b66b0SChao Yu {
9450b54fb84SJaegeuk Kim struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
946ba48a33eSChao Yu struct list_head *pend_list;
947b01a9201SJaegeuk Kim struct discard_cmd *dc;
948275b66b0SChao Yu
949ba48a33eSChao Yu f2fs_bug_on(sbi, !len);
950ba48a33eSChao Yu
951ba48a33eSChao Yu pend_list = &dcc->pend_list[plist_idx(len)];
952ba48a33eSChao Yu
95332410577SChao Yu dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL);
954b01a9201SJaegeuk Kim INIT_LIST_HEAD(&dc->list);
955c81abe34SJaegeuk Kim dc->bdev = bdev;
956f69475ddSJaegeuk Kim dc->di.lstart = lstart;
957f69475ddSJaegeuk Kim dc->di.start = start;
958f69475ddSJaegeuk Kim dc->di.len = len;
959ec9895adSChao Yu dc->ref = 0;
96015469963SJaegeuk Kim dc->state = D_PREP;
96172691af6SJaegeuk Kim dc->queued = 0;
962c81abe34SJaegeuk Kim dc->error = 0;
963b01a9201SJaegeuk Kim init_completion(&dc->wait);
96422d375ddSChao Yu list_add_tail(&dc->list, pend_list);
96535ec7d57SChao Yu spin_lock_init(&dc->lock);
96635ec7d57SChao Yu dc->bio_ref = 0;
9675f32366aSChao Yu atomic_inc(&dcc->discard_cmd_cnt);
968d84d1cbdSChao Yu dcc->undiscard_blks += len;
969004b6862SChao Yu
970004b6862SChao Yu return dc;
97115469963SJaegeuk Kim }
97215469963SJaegeuk Kim
f2fs_check_discard_tree(struct f2fs_sb_info * sbi)973f69475ddSJaegeuk Kim static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi)
974f69475ddSJaegeuk Kim {
975f69475ddSJaegeuk Kim #ifdef CONFIG_F2FS_CHECK_FS
976f69475ddSJaegeuk Kim struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
977f69475ddSJaegeuk Kim struct rb_node *cur = rb_first_cached(&dcc->root), *next;
978f69475ddSJaegeuk Kim struct discard_cmd *cur_dc, *next_dc;
979f69475ddSJaegeuk Kim
980f69475ddSJaegeuk Kim while (cur) {
981f69475ddSJaegeuk Kim next = rb_next(cur);
982f69475ddSJaegeuk Kim if (!next)
983f69475ddSJaegeuk Kim return true;
984f69475ddSJaegeuk Kim
985f69475ddSJaegeuk Kim cur_dc = rb_entry(cur, struct discard_cmd, rb_node);
986f69475ddSJaegeuk Kim next_dc = rb_entry(next, struct discard_cmd, rb_node);
987f69475ddSJaegeuk Kim
988f69475ddSJaegeuk Kim if (cur_dc->di.lstart + cur_dc->di.len > next_dc->di.lstart) {
989f69475ddSJaegeuk Kim f2fs_info(sbi, "broken discard_rbtree, "
990f69475ddSJaegeuk Kim "cur(%u, %u) next(%u, %u)",
991f69475ddSJaegeuk Kim cur_dc->di.lstart, cur_dc->di.len,
992f69475ddSJaegeuk Kim next_dc->di.lstart, next_dc->di.len);
993f69475ddSJaegeuk Kim return false;
994f69475ddSJaegeuk Kim }
995f69475ddSJaegeuk Kim cur = next;
996f69475ddSJaegeuk Kim }
997f69475ddSJaegeuk Kim #endif
998f69475ddSJaegeuk Kim return true;
999f69475ddSJaegeuk Kim }
1000f69475ddSJaegeuk Kim
__lookup_discard_cmd(struct f2fs_sb_info * sbi,block_t blkaddr)1001f69475ddSJaegeuk Kim static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi,
1002f69475ddSJaegeuk Kim block_t blkaddr)
1003004b6862SChao Yu {
1004004b6862SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1005f69475ddSJaegeuk Kim struct rb_node *node = dcc->root.rb_root.rb_node;
1006004b6862SChao Yu struct discard_cmd *dc;
1007004b6862SChao Yu
1008f69475ddSJaegeuk Kim while (node) {
1009f69475ddSJaegeuk Kim dc = rb_entry(node, struct discard_cmd, rb_node);
1010004b6862SChao Yu
1011f69475ddSJaegeuk Kim if (blkaddr < dc->di.lstart)
1012f69475ddSJaegeuk Kim node = node->rb_left;
1013f69475ddSJaegeuk Kim else if (blkaddr >= dc->di.lstart + dc->di.len)
1014f69475ddSJaegeuk Kim node = node->rb_right;
1015f69475ddSJaegeuk Kim else
1016f69475ddSJaegeuk Kim return dc;
1017f69475ddSJaegeuk Kim }
1018f69475ddSJaegeuk Kim return NULL;
1019f69475ddSJaegeuk Kim }
1020004b6862SChao Yu
__lookup_discard_cmd_ret(struct rb_root_cached * root,block_t blkaddr,struct discard_cmd ** prev_entry,struct discard_cmd ** next_entry,struct rb_node *** insert_p,struct rb_node ** insert_parent)1021f69475ddSJaegeuk Kim static struct discard_cmd *__lookup_discard_cmd_ret(struct rb_root_cached *root,
1022f69475ddSJaegeuk Kim block_t blkaddr,
1023f69475ddSJaegeuk Kim struct discard_cmd **prev_entry,
1024f69475ddSJaegeuk Kim struct discard_cmd **next_entry,
1025f69475ddSJaegeuk Kim struct rb_node ***insert_p,
1026f69475ddSJaegeuk Kim struct rb_node **insert_parent)
1027f69475ddSJaegeuk Kim {
1028f69475ddSJaegeuk Kim struct rb_node **pnode = &root->rb_root.rb_node;
1029f69475ddSJaegeuk Kim struct rb_node *parent = NULL, *tmp_node;
1030f69475ddSJaegeuk Kim struct discard_cmd *dc;
1031f69475ddSJaegeuk Kim
1032f69475ddSJaegeuk Kim *insert_p = NULL;
1033f69475ddSJaegeuk Kim *insert_parent = NULL;
1034f69475ddSJaegeuk Kim *prev_entry = NULL;
1035f69475ddSJaegeuk Kim *next_entry = NULL;
1036f69475ddSJaegeuk Kim
1037f69475ddSJaegeuk Kim if (RB_EMPTY_ROOT(&root->rb_root))
1038f69475ddSJaegeuk Kim return NULL;
1039f69475ddSJaegeuk Kim
1040f69475ddSJaegeuk Kim while (*pnode) {
1041f69475ddSJaegeuk Kim parent = *pnode;
1042f69475ddSJaegeuk Kim dc = rb_entry(*pnode, struct discard_cmd, rb_node);
1043f69475ddSJaegeuk Kim
1044f69475ddSJaegeuk Kim if (blkaddr < dc->di.lstart)
1045f69475ddSJaegeuk Kim pnode = &(*pnode)->rb_left;
1046f69475ddSJaegeuk Kim else if (blkaddr >= dc->di.lstart + dc->di.len)
1047f69475ddSJaegeuk Kim pnode = &(*pnode)->rb_right;
1048f69475ddSJaegeuk Kim else
1049f69475ddSJaegeuk Kim goto lookup_neighbors;
1050f69475ddSJaegeuk Kim }
1051f69475ddSJaegeuk Kim
1052f69475ddSJaegeuk Kim *insert_p = pnode;
1053f69475ddSJaegeuk Kim *insert_parent = parent;
1054f69475ddSJaegeuk Kim
1055f69475ddSJaegeuk Kim dc = rb_entry(parent, struct discard_cmd, rb_node);
1056f69475ddSJaegeuk Kim tmp_node = parent;
1057f69475ddSJaegeuk Kim if (parent && blkaddr > dc->di.lstart)
1058f69475ddSJaegeuk Kim tmp_node = rb_next(parent);
1059f69475ddSJaegeuk Kim *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1060f69475ddSJaegeuk Kim
1061f69475ddSJaegeuk Kim tmp_node = parent;
1062f69475ddSJaegeuk Kim if (parent && blkaddr < dc->di.lstart)
1063f69475ddSJaegeuk Kim tmp_node = rb_prev(parent);
1064f69475ddSJaegeuk Kim *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1065f69475ddSJaegeuk Kim return NULL;
1066f69475ddSJaegeuk Kim
1067f69475ddSJaegeuk Kim lookup_neighbors:
1068f69475ddSJaegeuk Kim /* lookup prev node for merging backward later */
1069f69475ddSJaegeuk Kim tmp_node = rb_prev(&dc->rb_node);
1070f69475ddSJaegeuk Kim *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1071f69475ddSJaegeuk Kim
1072f69475ddSJaegeuk Kim /* lookup next node for merging frontward later */
1073f69475ddSJaegeuk Kim tmp_node = rb_next(&dc->rb_node);
1074f69475ddSJaegeuk Kim *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1075004b6862SChao Yu return dc;
1076004b6862SChao Yu }
1077004b6862SChao Yu
__detach_discard_cmd(struct discard_cmd_control * dcc,struct discard_cmd * dc)1078004b6862SChao Yu static void __detach_discard_cmd(struct discard_cmd_control *dcc,
1079004b6862SChao Yu struct discard_cmd *dc)
108015469963SJaegeuk Kim {
1081dcc9165dSJaegeuk Kim if (dc->state == D_DONE)
108272691af6SJaegeuk Kim atomic_sub(dc->queued, &dcc->queued_discard);
1083004b6862SChao Yu
1084004b6862SChao Yu list_del(&dc->list);
10854dada3fdSChao Yu rb_erase_cached(&dc->rb_node, &dcc->root);
1086f69475ddSJaegeuk Kim dcc->undiscard_blks -= dc->di.len;
1087004b6862SChao Yu
1088004b6862SChao Yu kmem_cache_free(discard_cmd_slab, dc);
1089004b6862SChao Yu
1090004b6862SChao Yu atomic_dec(&dcc->discard_cmd_cnt);
1091004b6862SChao Yu }
1092004b6862SChao Yu
__remove_discard_cmd(struct f2fs_sb_info * sbi,struct discard_cmd * dc)1093004b6862SChao Yu static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1094004b6862SChao Yu struct discard_cmd *dc)
1095004b6862SChao Yu {
1096004b6862SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
109735ec7d57SChao Yu unsigned long flags;
1098dcc9165dSJaegeuk Kim
1099f69475ddSJaegeuk Kim trace_f2fs_remove_discard(dc->bdev, dc->di.start, dc->di.len);
11002ec6f2efSChao Yu
110135ec7d57SChao Yu spin_lock_irqsave(&dc->lock, flags);
110235ec7d57SChao Yu if (dc->bio_ref) {
110335ec7d57SChao Yu spin_unlock_irqrestore(&dc->lock, flags);
110435ec7d57SChao Yu return;
110535ec7d57SChao Yu }
110635ec7d57SChao Yu spin_unlock_irqrestore(&dc->lock, flags);
110735ec7d57SChao Yu
1108d9703d90SChao Yu f2fs_bug_on(sbi, dc->ref);
1109d9703d90SChao Yu
1110c81abe34SJaegeuk Kim if (dc->error == -EOPNOTSUPP)
1111c81abe34SJaegeuk Kim dc->error = 0;
111215469963SJaegeuk Kim
1113c81abe34SJaegeuk Kim if (dc->error)
1114c775ffabSChao Yu f2fs_info_ratelimited(sbi,
1115c775ffabSChao Yu "Issue discard(%u, %u, %u) failed, ret: %d",
1116f69475ddSJaegeuk Kim dc->di.lstart, dc->di.start, dc->di.len, dc->error);
1117004b6862SChao Yu __detach_discard_cmd(dcc, dc);
1118275b66b0SChao Yu }
1119275b66b0SChao Yu
f2fs_submit_discard_endio(struct bio * bio)1120c81abe34SJaegeuk Kim static void f2fs_submit_discard_endio(struct bio *bio)
1121c81abe34SJaegeuk Kim {
1122c81abe34SJaegeuk Kim struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
112335ec7d57SChao Yu unsigned long flags;
1124c81abe34SJaegeuk Kim
112535ec7d57SChao Yu spin_lock_irqsave(&dc->lock, flags);
11263fa6a8c5SSahitya Tummala if (!dc->error)
11273fa6a8c5SSahitya Tummala dc->error = blk_status_to_errno(bio->bi_status);
112835ec7d57SChao Yu dc->bio_ref--;
112935ec7d57SChao Yu if (!dc->bio_ref && dc->state == D_SUBMIT) {
1130c81abe34SJaegeuk Kim dc->state = D_DONE;
1131e31b9821SChao Yu complete_all(&dc->wait);
113235ec7d57SChao Yu }
113335ec7d57SChao Yu spin_unlock_irqrestore(&dc->lock, flags);
1134c81abe34SJaegeuk Kim bio_put(bio);
1135c81abe34SJaegeuk Kim }
1136c81abe34SJaegeuk Kim
__check_sit_bitmap(struct f2fs_sb_info * sbi,block_t start,block_t end)113794b1e10eSWei Yongjun static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
11386915ea9dSChao Yu block_t start, block_t end)
11396915ea9dSChao Yu {
11406915ea9dSChao Yu #ifdef CONFIG_F2FS_CHECK_FS
11416915ea9dSChao Yu struct seg_entry *sentry;
11426915ea9dSChao Yu unsigned int segno;
11436915ea9dSChao Yu block_t blk = start;
1144f0248ba6SJaegeuk Kim unsigned long offset, size, *map;
11456915ea9dSChao Yu
11466915ea9dSChao Yu while (blk < end) {
11476915ea9dSChao Yu segno = GET_SEGNO(sbi, blk);
11486915ea9dSChao Yu sentry = get_seg_entry(sbi, segno);
11496915ea9dSChao Yu offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
11506915ea9dSChao Yu
1151008396e1SYunlong Song if (end < START_BLOCK(sbi, segno + 1))
1152008396e1SYunlong Song size = GET_BLKOFF_FROM_SEG0(sbi, end);
1153008396e1SYunlong Song else
1154f0248ba6SJaegeuk Kim size = BLKS_PER_SEG(sbi);
11556915ea9dSChao Yu map = (unsigned long *)(sentry->cur_valid_map);
11566915ea9dSChao Yu offset = __find_rev_next_bit(map, size, offset);
11576915ea9dSChao Yu f2fs_bug_on(sbi, offset != size);
1158008396e1SYunlong Song blk = START_BLOCK(sbi, segno + 1);
11596915ea9dSChao Yu }
11606915ea9dSChao Yu #endif
11616915ea9dSChao Yu }
11626915ea9dSChao Yu
__init_discard_policy(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy,int discard_type,unsigned int granularity)11638bb4f253SJaegeuk Kim static void __init_discard_policy(struct f2fs_sb_info *sbi,
11648bb4f253SJaegeuk Kim struct discard_policy *dpolicy,
11658bb4f253SJaegeuk Kim int discard_type, unsigned int granularity)
11668bb4f253SJaegeuk Kim {
1167c35b8d5eSSahitya Tummala struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1168c35b8d5eSSahitya Tummala
11698bb4f253SJaegeuk Kim /* common policy */
11708bb4f253SJaegeuk Kim dpolicy->type = discard_type;
11718bb4f253SJaegeuk Kim dpolicy->sync = true;
117220ee4382SChao Yu dpolicy->ordered = false;
11738bb4f253SJaegeuk Kim dpolicy->granularity = granularity;
11748bb4f253SJaegeuk Kim
1175d2d8e896SKonstantin Vyshetsky dpolicy->max_requests = dcc->max_discard_request;
1176120e0ea1SYangtao Li dpolicy->io_aware_gran = dcc->discard_io_aware_gran;
11776ce48b0cSChao Yu dpolicy->timeout = false;
11788bb4f253SJaegeuk Kim
11798bb4f253SJaegeuk Kim if (discard_type == DPOLICY_BG) {
1180d2d8e896SKonstantin Vyshetsky dpolicy->min_interval = dcc->min_discard_issue_time;
1181d2d8e896SKonstantin Vyshetsky dpolicy->mid_interval = dcc->mid_discard_issue_time;
1182d2d8e896SKonstantin Vyshetsky dpolicy->max_interval = dcc->max_discard_issue_time;
11838bb4f253SJaegeuk Kim dpolicy->io_aware = true;
1184cba60849SChao Yu dpolicy->sync = false;
118520ee4382SChao Yu dpolicy->ordered = true;
11868a47d228SYangtao Li if (utilization(sbi) > dcc->discard_urgent_util) {
11871cd2e6d5SYangtao Li dpolicy->granularity = MIN_DISCARD_GRANULARITY;
1188c35b8d5eSSahitya Tummala if (atomic_read(&dcc->discard_cmd_cnt))
1189c35b8d5eSSahitya Tummala dpolicy->max_interval =
1190d2d8e896SKonstantin Vyshetsky dcc->min_discard_issue_time;
11918bb4f253SJaegeuk Kim }
11928bb4f253SJaegeuk Kim } else if (discard_type == DPOLICY_FORCE) {
1193d2d8e896SKonstantin Vyshetsky dpolicy->min_interval = dcc->min_discard_issue_time;
1194d2d8e896SKonstantin Vyshetsky dpolicy->mid_interval = dcc->mid_discard_issue_time;
1195d2d8e896SKonstantin Vyshetsky dpolicy->max_interval = dcc->max_discard_issue_time;
11968bb4f253SJaegeuk Kim dpolicy->io_aware = false;
11978bb4f253SJaegeuk Kim } else if (discard_type == DPOLICY_FSTRIM) {
11988bb4f253SJaegeuk Kim dpolicy->io_aware = false;
11998bb4f253SJaegeuk Kim } else if (discard_type == DPOLICY_UMOUNT) {
12008bb4f253SJaegeuk Kim dpolicy->io_aware = false;
1201b8623253SJaegeuk Kim /* we need to issue all to keep CP_TRIMMED_FLAG */
12021cd2e6d5SYangtao Li dpolicy->granularity = MIN_DISCARD_GRANULARITY;
12036ce48b0cSChao Yu dpolicy->timeout = true;
12048bb4f253SJaegeuk Kim }
12058bb4f253SJaegeuk Kim }
12068bb4f253SJaegeuk Kim
120735ec7d57SChao Yu static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
120835ec7d57SChao Yu struct block_device *bdev, block_t lstart,
120935ec7d57SChao Yu block_t start, block_t len);
121025f90805SDaejun Park
121125f90805SDaejun Park #ifdef CONFIG_BLK_DEV_ZONED
__submit_zone_reset_cmd(struct f2fs_sb_info * sbi,struct discard_cmd * dc,blk_opf_t flag,struct list_head * wait_list,unsigned int * issued)121225f90805SDaejun Park static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi,
121325f90805SDaejun Park struct discard_cmd *dc, blk_opf_t flag,
121425f90805SDaejun Park struct list_head *wait_list,
121525f90805SDaejun Park unsigned int *issued)
121625f90805SDaejun Park {
121725f90805SDaejun Park struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
121825f90805SDaejun Park struct block_device *bdev = dc->bdev;
121925f90805SDaejun Park struct bio *bio = bio_alloc(bdev, 0, REQ_OP_ZONE_RESET | flag, GFP_NOFS);
122025f90805SDaejun Park unsigned long flags;
122125f90805SDaejun Park
122225f90805SDaejun Park trace_f2fs_issue_reset_zone(bdev, dc->di.start);
122325f90805SDaejun Park
122425f90805SDaejun Park spin_lock_irqsave(&dc->lock, flags);
122525f90805SDaejun Park dc->state = D_SUBMIT;
122625f90805SDaejun Park dc->bio_ref++;
122725f90805SDaejun Park spin_unlock_irqrestore(&dc->lock, flags);
122825f90805SDaejun Park
122925f90805SDaejun Park if (issued)
123025f90805SDaejun Park (*issued)++;
123125f90805SDaejun Park
123225f90805SDaejun Park atomic_inc(&dcc->queued_discard);
123325f90805SDaejun Park dc->queued++;
123425f90805SDaejun Park list_move_tail(&dc->list, wait_list);
123525f90805SDaejun Park
123625f90805SDaejun Park /* sanity check on discard range */
123725f90805SDaejun Park __check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len);
123825f90805SDaejun Park
123925f90805SDaejun Park bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(dc->di.start);
124025f90805SDaejun Park bio->bi_private = dc;
124125f90805SDaejun Park bio->bi_end_io = f2fs_submit_discard_endio;
124225f90805SDaejun Park submit_bio(bio);
124325f90805SDaejun Park
124425f90805SDaejun Park atomic_inc(&dcc->issued_discard);
124525f90805SDaejun Park f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE);
124625f90805SDaejun Park }
124725f90805SDaejun Park #endif
124825f90805SDaejun Park
1249c81abe34SJaegeuk Kim /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
__submit_discard_cmd(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy,struct discard_cmd * dc,int * issued)12506b9cb124SChao Yu static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
125178997b56SChao Yu struct discard_policy *dpolicy,
1252185a453bSYuwei Guan struct discard_cmd *dc, int *issued)
1253c81abe34SJaegeuk Kim {
125435ec7d57SChao Yu struct block_device *bdev = dc->bdev;
125535ec7d57SChao Yu unsigned int max_discard_blocks =
1256cf0fbf89SChristoph Hellwig SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1257c81abe34SJaegeuk Kim struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
125878997b56SChao Yu struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
125978997b56SChao Yu &(dcc->fstrim_list) : &(dcc->wait_list);
12607649c873SBart Van Assche blk_opf_t flag = dpolicy->sync ? REQ_SYNC : 0;
126135ec7d57SChao Yu block_t lstart, start, len, total_len;
126235ec7d57SChao Yu int err = 0;
1263c81abe34SJaegeuk Kim
1264c81abe34SJaegeuk Kim if (dc->state != D_PREP)
12656b9cb124SChao Yu return 0;
1266c81abe34SJaegeuk Kim
1267d6184774SYunlei He if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
12686b9cb124SChao Yu return 0;
1269d6184774SYunlei He
127025f90805SDaejun Park #ifdef CONFIG_BLK_DEV_ZONED
127125f90805SDaejun Park if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) {
12723cb88bc1SShin'ichiro Kawasaki int devi = f2fs_bdev_index(sbi, bdev);
12733cb88bc1SShin'ichiro Kawasaki
12743cb88bc1SShin'ichiro Kawasaki if (devi < 0)
12753cb88bc1SShin'ichiro Kawasaki return -EINVAL;
12763cb88bc1SShin'ichiro Kawasaki
12773cb88bc1SShin'ichiro Kawasaki if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
12783cb88bc1SShin'ichiro Kawasaki __submit_zone_reset_cmd(sbi, dc, flag,
12793cb88bc1SShin'ichiro Kawasaki wait_list, issued);
128025f90805SDaejun Park return 0;
128125f90805SDaejun Park }
12823cb88bc1SShin'ichiro Kawasaki }
128325f90805SDaejun Park #endif
128425f90805SDaejun Park
1285*ed24ab98SChao Yu /*
1286*ed24ab98SChao Yu * stop issuing discard for any of below cases:
1287*ed24ab98SChao Yu * 1. device is conventional zone, but it doesn't support discard.
1288*ed24ab98SChao Yu * 2. device is regulare device, after snapshot it doesn't support
1289*ed24ab98SChao Yu * discard.
1290*ed24ab98SChao Yu */
1291*ed24ab98SChao Yu if (!bdev_max_discard_sectors(bdev))
1292*ed24ab98SChao Yu return -EOPNOTSUPP;
1293*ed24ab98SChao Yu
1294f69475ddSJaegeuk Kim trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len);
12950243a5f9SChao Yu
1296f69475ddSJaegeuk Kim lstart = dc->di.lstart;
1297f69475ddSJaegeuk Kim start = dc->di.start;
1298f69475ddSJaegeuk Kim len = dc->di.len;
129935ec7d57SChao Yu total_len = len;
130035ec7d57SChao Yu
1301f69475ddSJaegeuk Kim dc->di.len = 0;
130235ec7d57SChao Yu
130335ec7d57SChao Yu while (total_len && *issued < dpolicy->max_requests && !err) {
130435ec7d57SChao Yu struct bio *bio = NULL;
130535ec7d57SChao Yu unsigned long flags;
130635ec7d57SChao Yu bool last = true;
130735ec7d57SChao Yu
130835ec7d57SChao Yu if (len > max_discard_blocks) {
130935ec7d57SChao Yu len = max_discard_blocks;
131035ec7d57SChao Yu last = false;
131135ec7d57SChao Yu }
131235ec7d57SChao Yu
131335ec7d57SChao Yu (*issued)++;
131435ec7d57SChao Yu if (*issued == dpolicy->max_requests)
131535ec7d57SChao Yu last = true;
131635ec7d57SChao Yu
1317f69475ddSJaegeuk Kim dc->di.len += len;
131835ec7d57SChao Yu
1319b83dcfe6SChao Yu if (time_to_inject(sbi, FAULT_DISCARD)) {
1320b83dcfe6SChao Yu err = -EIO;
13215b7b74b7SYangtao Li } else {
132235ec7d57SChao Yu err = __blkdev_issue_discard(bdev,
132335ec7d57SChao Yu SECTOR_FROM_BLOCK(start),
132435ec7d57SChao Yu SECTOR_FROM_BLOCK(len),
132544abff2cSChristoph Hellwig GFP_NOFS, &bio);
13265b7b74b7SYangtao Li }
13276b9cb124SChao Yu if (err) {
13286b9cb124SChao Yu spin_lock_irqsave(&dc->lock, flags);
13296b9cb124SChao Yu if (dc->state == D_PARTIAL)
13306b9cb124SChao Yu dc->state = D_SUBMIT;
13316b9cb124SChao Yu spin_unlock_irqrestore(&dc->lock, flags);
13326b9cb124SChao Yu
13336b9cb124SChao Yu break;
13346b9cb124SChao Yu }
13356b9cb124SChao Yu
13366b9cb124SChao Yu f2fs_bug_on(sbi, !bio);
13376b9cb124SChao Yu
133835ec7d57SChao Yu /*
133935ec7d57SChao Yu * should keep before submission to avoid D_DONE
134035ec7d57SChao Yu * right away
134135ec7d57SChao Yu */
134235ec7d57SChao Yu spin_lock_irqsave(&dc->lock, flags);
134335ec7d57SChao Yu if (last)
1344c81abe34SJaegeuk Kim dc->state = D_SUBMIT;
134535ec7d57SChao Yu else
134635ec7d57SChao Yu dc->state = D_PARTIAL;
134735ec7d57SChao Yu dc->bio_ref++;
134835ec7d57SChao Yu spin_unlock_irqrestore(&dc->lock, flags);
134935ec7d57SChao Yu
135072691af6SJaegeuk Kim atomic_inc(&dcc->queued_discard);
135172691af6SJaegeuk Kim dc->queued++;
135235ec7d57SChao Yu list_move_tail(&dc->list, wait_list);
135335ec7d57SChao Yu
135435ec7d57SChao Yu /* sanity check on discard range */
13559249ddedSQiuyang Sun __check_sit_bitmap(sbi, lstart, lstart + len);
135635ec7d57SChao Yu
1357c81abe34SJaegeuk Kim bio->bi_private = dc;
1358c81abe34SJaegeuk Kim bio->bi_end_io = f2fs_submit_discard_endio;
1359ecc9aa00SChao Yu bio->bi_opf |= flag;
1360c81abe34SJaegeuk Kim submit_bio(bio);
136135ec7d57SChao Yu
136235ec7d57SChao Yu atomic_inc(&dcc->issued_discard);
1363b0af6d49SChao Yu
13647a2b15cfSYangtao Li f2fs_update_iostat(sbi, NULL, FS_DISCARD_IO, len * F2FS_BLKSIZE);
136535ec7d57SChao Yu
136635ec7d57SChao Yu lstart += len;
136735ec7d57SChao Yu start += len;
136835ec7d57SChao Yu total_len -= len;
136935ec7d57SChao Yu len = total_len;
137035ec7d57SChao Yu }
137135ec7d57SChao Yu
1372df423399SSahitya Tummala if (!err && len) {
1373df423399SSahitya Tummala dcc->undiscard_blks -= len;
137435ec7d57SChao Yu __update_discard_tree_range(sbi, bdev, lstart, start, len);
1375df423399SSahitya Tummala }
13766b9cb124SChao Yu return err;
1377c81abe34SJaegeuk Kim }
1378c81abe34SJaegeuk Kim
__insert_discard_cmd(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t lstart,block_t start,block_t len)1379f69475ddSJaegeuk Kim static void __insert_discard_cmd(struct f2fs_sb_info *sbi,
1380004b6862SChao Yu struct block_device *bdev, block_t lstart,
1381f69475ddSJaegeuk Kim block_t start, block_t len)
1382004b6862SChao Yu {
1383004b6862SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1384f69475ddSJaegeuk Kim struct rb_node **p = &dcc->root.rb_root.rb_node;
1385004b6862SChao Yu struct rb_node *parent = NULL;
1386f69475ddSJaegeuk Kim struct discard_cmd *dc;
13874dada3fdSChao Yu bool leftmost = true;
1388004b6862SChao Yu
1389f69475ddSJaegeuk Kim /* look up rb tree to find parent node */
1390f69475ddSJaegeuk Kim while (*p) {
1391f69475ddSJaegeuk Kim parent = *p;
1392f69475ddSJaegeuk Kim dc = rb_entry(parent, struct discard_cmd, rb_node);
1393f69475ddSJaegeuk Kim
1394f69475ddSJaegeuk Kim if (lstart < dc->di.lstart) {
1395f69475ddSJaegeuk Kim p = &(*p)->rb_left;
1396f69475ddSJaegeuk Kim } else if (lstart >= dc->di.lstart + dc->di.len) {
1397f69475ddSJaegeuk Kim p = &(*p)->rb_right;
1398f69475ddSJaegeuk Kim leftmost = false;
1399f69475ddSJaegeuk Kim } else {
1400f69475ddSJaegeuk Kim f2fs_bug_on(sbi, 1);
1401f69475ddSJaegeuk Kim }
1402004b6862SChao Yu }
1403004b6862SChao Yu
1404f69475ddSJaegeuk Kim dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
1405f69475ddSJaegeuk Kim
1406f69475ddSJaegeuk Kim rb_link_node(&dc->rb_node, parent, p);
1407f69475ddSJaegeuk Kim rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
1408004b6862SChao Yu }
1409004b6862SChao Yu
__relocate_discard_cmd(struct discard_cmd_control * dcc,struct discard_cmd * dc)1410ba48a33eSChao Yu static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
1411ba48a33eSChao Yu struct discard_cmd *dc)
1412ba48a33eSChao Yu {
1413f69475ddSJaegeuk Kim list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->di.len)]);
1414ba48a33eSChao Yu }
1415ba48a33eSChao Yu
__punch_discard_cmd(struct f2fs_sb_info * sbi,struct discard_cmd * dc,block_t blkaddr)1416004b6862SChao Yu static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1417004b6862SChao Yu struct discard_cmd *dc, block_t blkaddr)
1418004b6862SChao Yu {
1419ba48a33eSChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1420004b6862SChao Yu struct discard_info di = dc->di;
1421004b6862SChao Yu bool modified = false;
1422004b6862SChao Yu
1423f69475ddSJaegeuk Kim if (dc->state == D_DONE || dc->di.len == 1) {
1424004b6862SChao Yu __remove_discard_cmd(sbi, dc);
1425004b6862SChao Yu return;
1426004b6862SChao Yu }
1427004b6862SChao Yu
1428d84d1cbdSChao Yu dcc->undiscard_blks -= di.len;
1429d84d1cbdSChao Yu
1430004b6862SChao Yu if (blkaddr > di.lstart) {
1431f69475ddSJaegeuk Kim dc->di.len = blkaddr - dc->di.lstart;
1432f69475ddSJaegeuk Kim dcc->undiscard_blks += dc->di.len;
1433ba48a33eSChao Yu __relocate_discard_cmd(dcc, dc);
1434004b6862SChao Yu modified = true;
1435004b6862SChao Yu }
1436004b6862SChao Yu
1437004b6862SChao Yu if (blkaddr < di.lstart + di.len - 1) {
1438004b6862SChao Yu if (modified) {
1439f69475ddSJaegeuk Kim __insert_discard_cmd(sbi, dc->bdev, blkaddr + 1,
1440004b6862SChao Yu di.start + blkaddr + 1 - di.lstart,
1441f69475ddSJaegeuk Kim di.lstart + di.len - 1 - blkaddr);
1442004b6862SChao Yu } else {
1443f69475ddSJaegeuk Kim dc->di.lstart++;
1444f69475ddSJaegeuk Kim dc->di.len--;
1445f69475ddSJaegeuk Kim dc->di.start++;
1446f69475ddSJaegeuk Kim dcc->undiscard_blks += dc->di.len;
1447ba48a33eSChao Yu __relocate_discard_cmd(dcc, dc);
1448004b6862SChao Yu }
1449004b6862SChao Yu }
1450004b6862SChao Yu }
1451004b6862SChao Yu
__update_discard_tree_range(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t lstart,block_t start,block_t len)1452004b6862SChao Yu static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1453004b6862SChao Yu struct block_device *bdev, block_t lstart,
1454004b6862SChao Yu block_t start, block_t len)
1455004b6862SChao Yu {
1456004b6862SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1457004b6862SChao Yu struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1458004b6862SChao Yu struct discard_cmd *dc;
1459004b6862SChao Yu struct discard_info di = {0};
1460004b6862SChao Yu struct rb_node **insert_p = NULL, *insert_parent = NULL;
146135ec7d57SChao Yu unsigned int max_discard_blocks =
1462cf0fbf89SChristoph Hellwig SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1463004b6862SChao Yu block_t end = lstart + len;
1464004b6862SChao Yu
1465f69475ddSJaegeuk Kim dc = __lookup_discard_cmd_ret(&dcc->root, lstart,
1466f69475ddSJaegeuk Kim &prev_dc, &next_dc, &insert_p, &insert_parent);
1467004b6862SChao Yu if (dc)
1468004b6862SChao Yu prev_dc = dc;
1469004b6862SChao Yu
1470004b6862SChao Yu if (!prev_dc) {
1471004b6862SChao Yu di.lstart = lstart;
1472f69475ddSJaegeuk Kim di.len = next_dc ? next_dc->di.lstart - lstart : len;
1473004b6862SChao Yu di.len = min(di.len, len);
1474004b6862SChao Yu di.start = start;
1475004b6862SChao Yu }
1476004b6862SChao Yu
1477004b6862SChao Yu while (1) {
1478004b6862SChao Yu struct rb_node *node;
1479004b6862SChao Yu bool merged = false;
1480004b6862SChao Yu struct discard_cmd *tdc = NULL;
1481004b6862SChao Yu
1482004b6862SChao Yu if (prev_dc) {
1483f69475ddSJaegeuk Kim di.lstart = prev_dc->di.lstart + prev_dc->di.len;
1484004b6862SChao Yu if (di.lstart < lstart)
1485004b6862SChao Yu di.lstart = lstart;
1486004b6862SChao Yu if (di.lstart >= end)
1487004b6862SChao Yu break;
1488004b6862SChao Yu
1489f69475ddSJaegeuk Kim if (!next_dc || next_dc->di.lstart > end)
1490004b6862SChao Yu di.len = end - di.lstart;
1491004b6862SChao Yu else
1492f69475ddSJaegeuk Kim di.len = next_dc->di.lstart - di.lstart;
1493004b6862SChao Yu di.start = start + di.lstart - lstart;
1494004b6862SChao Yu }
1495004b6862SChao Yu
1496004b6862SChao Yu if (!di.len)
1497004b6862SChao Yu goto next;
1498004b6862SChao Yu
1499004b6862SChao Yu if (prev_dc && prev_dc->state == D_PREP &&
1500004b6862SChao Yu prev_dc->bdev == bdev &&
150135ec7d57SChao Yu __is_discard_back_mergeable(&di, &prev_dc->di,
150235ec7d57SChao Yu max_discard_blocks)) {
1503004b6862SChao Yu prev_dc->di.len += di.len;
1504d84d1cbdSChao Yu dcc->undiscard_blks += di.len;
1505ba48a33eSChao Yu __relocate_discard_cmd(dcc, prev_dc);
1506004b6862SChao Yu di = prev_dc->di;
1507004b6862SChao Yu tdc = prev_dc;
1508004b6862SChao Yu merged = true;
1509004b6862SChao Yu }
1510004b6862SChao Yu
1511004b6862SChao Yu if (next_dc && next_dc->state == D_PREP &&
1512004b6862SChao Yu next_dc->bdev == bdev &&
151335ec7d57SChao Yu __is_discard_front_mergeable(&di, &next_dc->di,
151435ec7d57SChao Yu max_discard_blocks)) {
1515004b6862SChao Yu next_dc->di.lstart = di.lstart;
1516004b6862SChao Yu next_dc->di.len += di.len;
1517004b6862SChao Yu next_dc->di.start = di.start;
1518d84d1cbdSChao Yu dcc->undiscard_blks += di.len;
1519ba48a33eSChao Yu __relocate_discard_cmd(dcc, next_dc);
1520004b6862SChao Yu if (tdc)
1521004b6862SChao Yu __remove_discard_cmd(sbi, tdc);
1522004b6862SChao Yu merged = true;
1523004b6862SChao Yu }
1524004b6862SChao Yu
1525f69475ddSJaegeuk Kim if (!merged)
1526f69475ddSJaegeuk Kim __insert_discard_cmd(sbi, bdev,
1527f69475ddSJaegeuk Kim di.lstart, di.start, di.len);
1528004b6862SChao Yu next:
1529004b6862SChao Yu prev_dc = next_dc;
1530004b6862SChao Yu if (!prev_dc)
1531004b6862SChao Yu break;
1532004b6862SChao Yu
1533004b6862SChao Yu node = rb_next(&prev_dc->rb_node);
1534004b6862SChao Yu next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1535004b6862SChao Yu }
1536004b6862SChao Yu }
1537004b6862SChao Yu
153825f90805SDaejun Park #ifdef CONFIG_BLK_DEV_ZONED
__queue_zone_reset_cmd(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t blkstart,block_t lblkstart,block_t blklen)153925f90805SDaejun Park static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi,
154025f90805SDaejun Park struct block_device *bdev, block_t blkstart, block_t lblkstart,
154125f90805SDaejun Park block_t blklen)
154225f90805SDaejun Park {
154325f90805SDaejun Park trace_f2fs_queue_reset_zone(bdev, blkstart);
154425f90805SDaejun Park
154525f90805SDaejun Park mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
154625f90805SDaejun Park __insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen);
154725f90805SDaejun Park mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
154825f90805SDaejun Park }
154925f90805SDaejun Park #endif
155025f90805SDaejun Park
__queue_discard_cmd(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t blkstart,block_t blklen)155162081639SYangtao Li static void __queue_discard_cmd(struct f2fs_sb_info *sbi,
1552c81abe34SJaegeuk Kim struct block_device *bdev, block_t blkstart, block_t blklen)
1553c81abe34SJaegeuk Kim {
1554c81abe34SJaegeuk Kim block_t lblkstart = blkstart;
1555c81abe34SJaegeuk Kim
15567f3d7719SDamien Le Moal if (!f2fs_bdev_support_discard(bdev))
155762081639SYangtao Li return;
15587f3d7719SDamien Le Moal
15590243a5f9SChao Yu trace_f2fs_queue_discard(bdev, blkstart, blklen);
1560c81abe34SJaegeuk Kim
15610916878dSDamien Le Moal if (f2fs_is_multi_device(sbi)) {
1562c81abe34SJaegeuk Kim int devi = f2fs_target_device_index(sbi, blkstart);
1563c81abe34SJaegeuk Kim
1564c81abe34SJaegeuk Kim blkstart -= FDEV(devi).start_blk;
1565c81abe34SJaegeuk Kim }
156635ec7d57SChao Yu mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1567004b6862SChao Yu __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
156835ec7d57SChao Yu mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1569c81abe34SJaegeuk Kim }
1570c81abe34SJaegeuk Kim
__issue_discard_cmd_orderly(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy,int * issued)1571185a453bSYuwei Guan static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1572185a453bSYuwei Guan struct discard_policy *dpolicy, int *issued)
157320ee4382SChao Yu {
157420ee4382SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
157520ee4382SChao Yu struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
157620ee4382SChao Yu struct rb_node **insert_p = NULL, *insert_parent = NULL;
157720ee4382SChao Yu struct discard_cmd *dc;
157820ee4382SChao Yu struct blk_plug plug;
157920ee4382SChao Yu bool io_interrupted = false;
158020ee4382SChao Yu
158120ee4382SChao Yu mutex_lock(&dcc->cmd_lock);
1582f69475ddSJaegeuk Kim dc = __lookup_discard_cmd_ret(&dcc->root, dcc->next_pos,
1583f69475ddSJaegeuk Kim &prev_dc, &next_dc, &insert_p, &insert_parent);
158420ee4382SChao Yu if (!dc)
158520ee4382SChao Yu dc = next_dc;
158620ee4382SChao Yu
158720ee4382SChao Yu blk_start_plug(&plug);
158820ee4382SChao Yu
158920ee4382SChao Yu while (dc) {
159020ee4382SChao Yu struct rb_node *node;
15916b9cb124SChao Yu int err = 0;
159220ee4382SChao Yu
159320ee4382SChao Yu if (dc->state != D_PREP)
159420ee4382SChao Yu goto next;
159520ee4382SChao Yu
1596a7d10cf3SSahitya Tummala if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
159720ee4382SChao Yu io_interrupted = true;
159820ee4382SChao Yu break;
159920ee4382SChao Yu }
160020ee4382SChao Yu
1601f69475ddSJaegeuk Kim dcc->next_pos = dc->di.lstart + dc->di.len;
1602185a453bSYuwei Guan err = __submit_discard_cmd(sbi, dpolicy, dc, issued);
160320ee4382SChao Yu
1604185a453bSYuwei Guan if (*issued >= dpolicy->max_requests)
160520ee4382SChao Yu break;
160620ee4382SChao Yu next:
160720ee4382SChao Yu node = rb_next(&dc->rb_node);
16086b9cb124SChao Yu if (err)
16096b9cb124SChao Yu __remove_discard_cmd(sbi, dc);
161020ee4382SChao Yu dc = rb_entry_safe(node, struct discard_cmd, rb_node);
161120ee4382SChao Yu }
161220ee4382SChao Yu
161320ee4382SChao Yu blk_finish_plug(&plug);
161420ee4382SChao Yu
161520ee4382SChao Yu if (!dc)
161620ee4382SChao Yu dcc->next_pos = 0;
161720ee4382SChao Yu
161820ee4382SChao Yu mutex_unlock(&dcc->cmd_lock);
161920ee4382SChao Yu
1620185a453bSYuwei Guan if (!(*issued) && io_interrupted)
1621185a453bSYuwei Guan *issued = -1;
162220ee4382SChao Yu }
1623141af6baSSahitya Tummala static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1624141af6baSSahitya Tummala struct discard_policy *dpolicy);
162520ee4382SChao Yu
__issue_discard_cmd(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy)162678997b56SChao Yu static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
162778997b56SChao Yu struct discard_policy *dpolicy)
1628bd5b0738SChao Yu {
1629bd5b0738SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1630bd5b0738SChao Yu struct list_head *pend_list;
1631bd5b0738SChao Yu struct discard_cmd *dc, *tmp;
1632bd5b0738SChao Yu struct blk_plug plug;
1633141af6baSSahitya Tummala int i, issued;
1634e6c6de18SChao Yu bool io_interrupted = false;
1635bd5b0738SChao Yu
16366ce48b0cSChao Yu if (dpolicy->timeout)
16376ce48b0cSChao Yu f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
163803f2c02dSJaegeuk Kim
1639141af6baSSahitya Tummala retry:
1640141af6baSSahitya Tummala issued = 0;
164178997b56SChao Yu for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
16426ce48b0cSChao Yu if (dpolicy->timeout &&
16436ce48b0cSChao Yu f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
164403f2c02dSJaegeuk Kim break;
164503f2c02dSJaegeuk Kim
164678997b56SChao Yu if (i + 1 < dpolicy->granularity)
164778997b56SChao Yu break;
164820ee4382SChao Yu
1649185a453bSYuwei Guan if (i + 1 < dcc->max_ordered_discard && dpolicy->ordered) {
1650185a453bSYuwei Guan __issue_discard_cmd_orderly(sbi, dpolicy, &issued);
1651185a453bSYuwei Guan return issued;
1652185a453bSYuwei Guan }
165320ee4382SChao Yu
1654bd5b0738SChao Yu pend_list = &dcc->pend_list[i];
165533da62cfSChao Yu
165633da62cfSChao Yu mutex_lock(&dcc->cmd_lock);
165749c60c67SChao Yu if (list_empty(pend_list))
165849c60c67SChao Yu goto next;
165967fce70bSChao Yu if (unlikely(dcc->rbtree_check))
1660f69475ddSJaegeuk Kim f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
166133da62cfSChao Yu blk_start_plug(&plug);
1662bd5b0738SChao Yu list_for_each_entry_safe(dc, tmp, pend_list, list) {
1663bd5b0738SChao Yu f2fs_bug_on(sbi, dc->state != D_PREP);
1664bd5b0738SChao Yu
16656ce48b0cSChao Yu if (dpolicy->timeout &&
16666ce48b0cSChao Yu f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
16676e0cd4a9SHeng Xiao break;
16686e0cd4a9SHeng Xiao
1669ecc9aa00SChao Yu if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1670a7d10cf3SSahitya Tummala !is_idle(sbi, DISCARD_TIME)) {
1671e6c6de18SChao Yu io_interrupted = true;
1672522d1711SChao Yu break;
1673e6c6de18SChao Yu }
1674e6c6de18SChao Yu
167535ec7d57SChao Yu __submit_discard_cmd(sbi, dpolicy, dc, &issued);
1676522d1711SChao Yu
167735ec7d57SChao Yu if (issued >= dpolicy->max_requests)
167833da62cfSChao Yu break;
1679bd5b0738SChao Yu }
1680bd5b0738SChao Yu blk_finish_plug(&plug);
168149c60c67SChao Yu next:
1682bd5b0738SChao Yu mutex_unlock(&dcc->cmd_lock);
1683969d1b18SChao Yu
1684522d1711SChao Yu if (issued >= dpolicy->max_requests || io_interrupted)
168533da62cfSChao Yu break;
168633da62cfSChao Yu }
168733da62cfSChao Yu
1688141af6baSSahitya Tummala if (dpolicy->type == DPOLICY_UMOUNT && issued) {
1689141af6baSSahitya Tummala __wait_all_discard_cmd(sbi, dpolicy);
1690141af6baSSahitya Tummala goto retry;
1691141af6baSSahitya Tummala }
1692141af6baSSahitya Tummala
1693e6c6de18SChao Yu if (!issued && io_interrupted)
1694e6c6de18SChao Yu issued = -1;
1695e6c6de18SChao Yu
1696969d1b18SChao Yu return issued;
1697969d1b18SChao Yu }
1698969d1b18SChao Yu
__drop_discard_cmd(struct f2fs_sb_info * sbi)1699cf5c759fSChao Yu static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1700969d1b18SChao Yu {
1701969d1b18SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1702969d1b18SChao Yu struct list_head *pend_list;
1703969d1b18SChao Yu struct discard_cmd *dc, *tmp;
1704969d1b18SChao Yu int i;
1705cf5c759fSChao Yu bool dropped = false;
1706969d1b18SChao Yu
1707969d1b18SChao Yu mutex_lock(&dcc->cmd_lock);
1708969d1b18SChao Yu for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1709969d1b18SChao Yu pend_list = &dcc->pend_list[i];
1710969d1b18SChao Yu list_for_each_entry_safe(dc, tmp, pend_list, list) {
1711969d1b18SChao Yu f2fs_bug_on(sbi, dc->state != D_PREP);
1712969d1b18SChao Yu __remove_discard_cmd(sbi, dc);
1713cf5c759fSChao Yu dropped = true;
1714969d1b18SChao Yu }
1715969d1b18SChao Yu }
1716969d1b18SChao Yu mutex_unlock(&dcc->cmd_lock);
1717cf5c759fSChao Yu
1718cf5c759fSChao Yu return dropped;
1719bd5b0738SChao Yu }
1720bd5b0738SChao Yu
f2fs_drop_discard_cmd(struct f2fs_sb_info * sbi)17214d57b86dSChao Yu void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
17227950e9acSChao Yu {
17237950e9acSChao Yu __drop_discard_cmd(sbi);
17247950e9acSChao Yu }
17257950e9acSChao Yu
__wait_one_discard_bio(struct f2fs_sb_info * sbi,struct discard_cmd * dc)17260ea80512SChao Yu static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
17272a510c00SChao Yu struct discard_cmd *dc)
17282a510c00SChao Yu {
17292a510c00SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
17300ea80512SChao Yu unsigned int len = 0;
17312a510c00SChao Yu
17322a510c00SChao Yu wait_for_completion_io(&dc->wait);
17332a510c00SChao Yu mutex_lock(&dcc->cmd_lock);
17342a510c00SChao Yu f2fs_bug_on(sbi, dc->state != D_DONE);
17352a510c00SChao Yu dc->ref--;
17360ea80512SChao Yu if (!dc->ref) {
17370ea80512SChao Yu if (!dc->error)
1738f69475ddSJaegeuk Kim len = dc->di.len;
17392a510c00SChao Yu __remove_discard_cmd(sbi, dc);
17400ea80512SChao Yu }
17412a510c00SChao Yu mutex_unlock(&dcc->cmd_lock);
17420ea80512SChao Yu
17430ea80512SChao Yu return len;
17442a510c00SChao Yu }
17452a510c00SChao Yu
__wait_discard_cmd_range(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy,block_t start,block_t end)17460ea80512SChao Yu static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
174778997b56SChao Yu struct discard_policy *dpolicy,
174878997b56SChao Yu block_t start, block_t end)
174963a94fa1SChao Yu {
175063a94fa1SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
175178997b56SChao Yu struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
175278997b56SChao Yu &(dcc->fstrim_list) : &(dcc->wait_list);
17539e3a845dSJakob Koschel struct discard_cmd *dc = NULL, *iter, *tmp;
17540ea80512SChao Yu unsigned int trimmed = 0;
17556afae633SChao Yu
17566afae633SChao Yu next:
17579e3a845dSJakob Koschel dc = NULL;
175863a94fa1SChao Yu
175963a94fa1SChao Yu mutex_lock(&dcc->cmd_lock);
17609e3a845dSJakob Koschel list_for_each_entry_safe(iter, tmp, wait_list, list) {
1761f69475ddSJaegeuk Kim if (iter->di.lstart + iter->di.len <= start ||
1762f69475ddSJaegeuk Kim end <= iter->di.lstart)
17638412663dSChao Yu continue;
1764f69475ddSJaegeuk Kim if (iter->di.len < dpolicy->granularity)
17658412663dSChao Yu continue;
17669e3a845dSJakob Koschel if (iter->state == D_DONE && !iter->ref) {
17679e3a845dSJakob Koschel wait_for_completion_io(&iter->wait);
17689e3a845dSJakob Koschel if (!iter->error)
1769f69475ddSJaegeuk Kim trimmed += iter->di.len;
17709e3a845dSJakob Koschel __remove_discard_cmd(sbi, iter);
17716afae633SChao Yu } else {
17729e3a845dSJakob Koschel iter->ref++;
17739e3a845dSJakob Koschel dc = iter;
17746afae633SChao Yu break;
177563a94fa1SChao Yu }
177663a94fa1SChao Yu }
177763a94fa1SChao Yu mutex_unlock(&dcc->cmd_lock);
17786afae633SChao Yu
17799e3a845dSJakob Koschel if (dc) {
17800ea80512SChao Yu trimmed += __wait_one_discard_bio(sbi, dc);
17816afae633SChao Yu goto next;
17826afae633SChao Yu }
17830ea80512SChao Yu
17840ea80512SChao Yu return trimmed;
178563a94fa1SChao Yu }
178663a94fa1SChao Yu
__wait_all_discard_cmd(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy)178701f9cf6dSChao Yu static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
178878997b56SChao Yu struct discard_policy *dpolicy)
17898412663dSChao Yu {
17909a997188SJaegeuk Kim struct discard_policy dp;
179101f9cf6dSChao Yu unsigned int discard_blks;
17929a997188SJaegeuk Kim
179301f9cf6dSChao Yu if (dpolicy)
179401f9cf6dSChao Yu return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
17959a997188SJaegeuk Kim
17969a997188SJaegeuk Kim /* wait all */
1797f08142bcSYangtao Li __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, MIN_DISCARD_GRANULARITY);
179801f9cf6dSChao Yu discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1799f08142bcSYangtao Li __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, MIN_DISCARD_GRANULARITY);
180001f9cf6dSChao Yu discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
180101f9cf6dSChao Yu
180201f9cf6dSChao Yu return discard_blks;
18038412663dSChao Yu }
18048412663dSChao Yu
18054e6a8d9bSJaegeuk Kim /* This should be covered by global mutex, &sit_i->sentry_lock */
f2fs_wait_discard_bio(struct f2fs_sb_info * sbi,block_t blkaddr)180694b1e10eSWei Yongjun static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1807275b66b0SChao Yu {
18080b54fb84SJaegeuk Kim struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1809004b6862SChao Yu struct discard_cmd *dc;
1810ec9895adSChao Yu bool need_wait = false;
1811275b66b0SChao Yu
181215469963SJaegeuk Kim mutex_lock(&dcc->cmd_lock);
1813f69475ddSJaegeuk Kim dc = __lookup_discard_cmd(sbi, blkaddr);
181425f90805SDaejun Park #ifdef CONFIG_BLK_DEV_ZONED
181525f90805SDaejun Park if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) {
18163cb88bc1SShin'ichiro Kawasaki int devi = f2fs_bdev_index(sbi, dc->bdev);
18173cb88bc1SShin'ichiro Kawasaki
18183cb88bc1SShin'ichiro Kawasaki if (devi < 0) {
18193cb88bc1SShin'ichiro Kawasaki mutex_unlock(&dcc->cmd_lock);
18203cb88bc1SShin'ichiro Kawasaki return;
18213cb88bc1SShin'ichiro Kawasaki }
18223cb88bc1SShin'ichiro Kawasaki
18233cb88bc1SShin'ichiro Kawasaki if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
182425f90805SDaejun Park /* force submit zone reset */
182525f90805SDaejun Park if (dc->state == D_PREP)
182625f90805SDaejun Park __submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
182725f90805SDaejun Park &dcc->wait_list, NULL);
182825f90805SDaejun Park dc->ref++;
182925f90805SDaejun Park mutex_unlock(&dcc->cmd_lock);
183025f90805SDaejun Park /* wait zone reset */
183125f90805SDaejun Park __wait_one_discard_bio(sbi, dc);
183225f90805SDaejun Park return;
183325f90805SDaejun Park }
18343cb88bc1SShin'ichiro Kawasaki }
183525f90805SDaejun Park #endif
1836004b6862SChao Yu if (dc) {
1837ec9895adSChao Yu if (dc->state == D_PREP) {
18383d6a650fSYunlei He __punch_discard_cmd(sbi, dc, blkaddr);
1839ec9895adSChao Yu } else {
1840ec9895adSChao Yu dc->ref++;
1841ec9895adSChao Yu need_wait = true;
1842275b66b0SChao Yu }
1843ec9895adSChao Yu }
1844d431413fSChao Yu mutex_unlock(&dcc->cmd_lock);
1845ec9895adSChao Yu
18462a510c00SChao Yu if (need_wait)
18472a510c00SChao Yu __wait_one_discard_bio(sbi, dc);
1848d431413fSChao Yu }
1849d431413fSChao Yu
f2fs_stop_discard_thread(struct f2fs_sb_info * sbi)18504d57b86dSChao Yu void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1851cce13252SChao Yu {
1852cce13252SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1853cce13252SChao Yu
1854cce13252SChao Yu if (dcc && dcc->f2fs_issue_discard) {
1855cce13252SChao Yu struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1856cce13252SChao Yu
1857cce13252SChao Yu dcc->f2fs_issue_discard = NULL;
1858cce13252SChao Yu kthread_stop(discard_thread);
185915469963SJaegeuk Kim }
186015469963SJaegeuk Kim }
186115469963SJaegeuk Kim
1862b1c5ef26SYangtao Li /**
1863b1c5ef26SYangtao Li * f2fs_issue_discard_timeout() - Issue all discard cmd within UMOUNT_DISCARD_TIMEOUT
1864b1c5ef26SYangtao Li * @sbi: the f2fs_sb_info data for discard cmd to issue
1865b1c5ef26SYangtao Li *
1866b1c5ef26SYangtao Li * When UMOUNT_DISCARD_TIMEOUT is exceeded, all remaining discard commands will be dropped
1867b1c5ef26SYangtao Li *
1868b1c5ef26SYangtao Li * Return true if issued all discard cmd or no discard cmd need issue, otherwise return false.
1869b1c5ef26SYangtao Li */
f2fs_issue_discard_timeout(struct f2fs_sb_info * sbi)187003f2c02dSJaegeuk Kim bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
1871275b66b0SChao Yu {
1872969d1b18SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
187378997b56SChao Yu struct discard_policy dpolicy;
1874cf5c759fSChao Yu bool dropped;
1875969d1b18SChao Yu
187625547439SYangtao Li if (!atomic_read(&dcc->discard_cmd_cnt))
1877b1c5ef26SYangtao Li return true;
187825547439SYangtao Li
18798bb4f253SJaegeuk Kim __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
18808bb4f253SJaegeuk Kim dcc->discard_granularity);
188178997b56SChao Yu __issue_discard_cmd(sbi, &dpolicy);
1882cf5c759fSChao Yu dropped = __drop_discard_cmd(sbi);
1883cf5c759fSChao Yu
18849a997188SJaegeuk Kim /* just to make sure there is no pending discard commands */
18859a997188SJaegeuk Kim __wait_all_discard_cmd(sbi, NULL);
18862482c432SChao Yu
18872482c432SChao Yu f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1888b1c5ef26SYangtao Li return !dropped;
1889969d1b18SChao Yu }
1890969d1b18SChao Yu
issue_discard_thread(void * data)189115469963SJaegeuk Kim static int issue_discard_thread(void *data)
189215469963SJaegeuk Kim {
189315469963SJaegeuk Kim struct f2fs_sb_info *sbi = data;
189415469963SJaegeuk Kim struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
189515469963SJaegeuk Kim wait_queue_head_t *q = &dcc->discard_wait_queue;
189678997b56SChao Yu struct discard_policy dpolicy;
1897d2d8e896SKonstantin Vyshetsky unsigned int wait_ms = dcc->min_discard_issue_time;
1898969d1b18SChao Yu int issued;
18991d7be270SJaegeuk Kim
19001d7be270SJaegeuk Kim set_freezable();
19011d7be270SJaegeuk Kim
19021d7be270SJaegeuk Kim do {
190348c08c51SYangtao Li wait_event_interruptible_timeout(*q,
190448c08c51SYangtao Li kthread_should_stop() || freezing(current) ||
190548c08c51SYangtao Li dcc->discard_wake,
190648c08c51SYangtao Li msecs_to_jiffies(wait_ms));
190748c08c51SYangtao Li
1908c35b8d5eSSahitya Tummala if (sbi->gc_mode == GC_URGENT_HIGH ||
1909c35b8d5eSSahitya Tummala !f2fs_available_free_memory(sbi, DISCARD_CACHE))
1910f08142bcSYangtao Li __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE,
1911f08142bcSYangtao Li MIN_DISCARD_GRANULARITY);
1912c35b8d5eSSahitya Tummala else
19138bb4f253SJaegeuk Kim __init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
191478997b56SChao Yu dcc->discard_granularity);
191578997b56SChao Yu
191635a9a766SSheng Yong if (dcc->discard_wake)
191745c98f5aSYangtao Li dcc->discard_wake = false;
191835a9a766SSheng Yong
191976c7bfb3SJaegeuk Kim /* clean up pending candidates before going to sleep */
192076c7bfb3SJaegeuk Kim if (atomic_read(&dcc->queued_discard))
192176c7bfb3SJaegeuk Kim __wait_all_discard_cmd(sbi, NULL);
192276c7bfb3SJaegeuk Kim
19231d7be270SJaegeuk Kim if (try_to_freeze())
19241d7be270SJaegeuk Kim continue;
19253b60d802SChao Yu if (f2fs_readonly(sbi->sb))
19263b60d802SChao Yu continue;
192715469963SJaegeuk Kim if (kthread_should_stop())
192815469963SJaegeuk Kim return 0;
192948c08c51SYangtao Li if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
193048c08c51SYangtao Li !atomic_read(&dcc->discard_cmd_cnt)) {
1931d6184774SYunlei He wait_ms = dpolicy.max_interval;
1932d6184774SYunlei He continue;
1933d6184774SYunlei He }
193415469963SJaegeuk Kim
1935dc6febb6SChao Yu sb_start_intwrite(sbi->sb);
1936dc6febb6SChao Yu
193778997b56SChao Yu issued = __issue_discard_cmd(sbi, &dpolicy);
1938f9d1dcedSYunlei He if (issued > 0) {
193978997b56SChao Yu __wait_all_discard_cmd(sbi, &dpolicy);
194078997b56SChao Yu wait_ms = dpolicy.min_interval;
1941f9d1dcedSYunlei He } else if (issued == -1) {
1942a7d10cf3SSahitya Tummala wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1943a7d10cf3SSahitya Tummala if (!wait_ms)
1944f9d1dcedSYunlei He wait_ms = dpolicy.mid_interval;
1945969d1b18SChao Yu } else {
194678997b56SChao Yu wait_ms = dpolicy.max_interval;
1947969d1b18SChao Yu }
194848c08c51SYangtao Li if (!atomic_read(&dcc->discard_cmd_cnt))
194948c08c51SYangtao Li wait_ms = dpolicy.max_interval;
195015469963SJaegeuk Kim
1951dc6febb6SChao Yu sb_end_intwrite(sbi->sb);
1952dc6febb6SChao Yu
19531d7be270SJaegeuk Kim } while (!kthread_should_stop());
19541d7be270SJaegeuk Kim return 0;
195515469963SJaegeuk Kim }
195615469963SJaegeuk Kim
1957f46e8809SDamien Le Moal #ifdef CONFIG_BLK_DEV_ZONED
__f2fs_issue_discard_zone(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t blkstart,block_t blklen)19583c62be17SJaegeuk Kim static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
19593c62be17SJaegeuk Kim struct block_device *bdev, block_t blkstart, block_t blklen)
1960f46e8809SDamien Le Moal {
196192592285SJaegeuk Kim sector_t sector, nr_sects;
196210a875f8SKinglong Mee block_t lblkstart = blkstart;
19633c62be17SJaegeuk Kim int devi = 0;
1964f26aaee6SYohan Joung u64 remainder = 0;
1965f46e8809SDamien Le Moal
19660916878dSDamien Le Moal if (f2fs_is_multi_device(sbi)) {
19673c62be17SJaegeuk Kim devi = f2fs_target_device_index(sbi, blkstart);
196895175dafSDamien Le Moal if (blkstart < FDEV(devi).start_blk ||
196995175dafSDamien Le Moal blkstart > FDEV(devi).end_blk) {
1970dcbb4c10SJoe Perches f2fs_err(sbi, "Invalid block %x", blkstart);
197195175dafSDamien Le Moal return -EIO;
197295175dafSDamien Le Moal }
19733c62be17SJaegeuk Kim blkstart -= FDEV(devi).start_blk;
19743c62be17SJaegeuk Kim }
1975f46e8809SDamien Le Moal
197695175dafSDamien Le Moal /* For sequential zones, reset the zone write pointer */
197795175dafSDamien Le Moal if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
197892592285SJaegeuk Kim sector = SECTOR_FROM_BLOCK(blkstart);
197992592285SJaegeuk Kim nr_sects = SECTOR_FROM_BLOCK(blklen);
1980f26aaee6SYohan Joung div64_u64_rem(sector, bdev_zone_sectors(bdev), &remainder);
198192592285SJaegeuk Kim
1982f26aaee6SYohan Joung if (remainder || nr_sects != bdev_zone_sectors(bdev)) {
1983dcbb4c10SJoe Perches f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
198492592285SJaegeuk Kim devi, sbi->s_ndevs ? FDEV(devi).path : "",
198592592285SJaegeuk Kim blkstart, blklen);
198692592285SJaegeuk Kim return -EIO;
198792592285SJaegeuk Kim }
198825f90805SDaejun Park
198925f90805SDaejun Park if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) {
1990d50aaeecSJaegeuk Kim trace_f2fs_issue_reset_zone(bdev, blkstart);
19916c1b1da5SAjay Joshi return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
19926c1b1da5SAjay Joshi sector, nr_sects, GFP_NOFS);
1993f46e8809SDamien Le Moal }
199495175dafSDamien Le Moal
199525f90805SDaejun Park __queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen);
199625f90805SDaejun Park return 0;
199725f90805SDaejun Park }
199825f90805SDaejun Park
199995175dafSDamien Le Moal /* For conventional zones, use regular discard if supported */
200062081639SYangtao Li __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
200162081639SYangtao Li return 0;
2002f46e8809SDamien Le Moal }
2003f46e8809SDamien Le Moal #endif
2004f46e8809SDamien Le Moal
__issue_discard_async(struct f2fs_sb_info * sbi,struct block_device * bdev,block_t blkstart,block_t blklen)20053c62be17SJaegeuk Kim static int __issue_discard_async(struct f2fs_sb_info *sbi,
20063c62be17SJaegeuk Kim struct block_device *bdev, block_t blkstart, block_t blklen)
20073c62be17SJaegeuk Kim {
20083c62be17SJaegeuk Kim #ifdef CONFIG_BLK_DEV_ZONED
20097f3d7719SDamien Le Moal if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
20103c62be17SJaegeuk Kim return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
20113c62be17SJaegeuk Kim #endif
201262081639SYangtao Li __queue_discard_cmd(sbi, bdev, blkstart, blklen);
201362081639SYangtao Li return 0;
20143c62be17SJaegeuk Kim }
20153c62be17SJaegeuk Kim
f2fs_issue_discard(struct f2fs_sb_info * sbi,block_t blkstart,block_t blklen)20161e87a78dSJaegeuk Kim static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
201737208879SJaegeuk Kim block_t blkstart, block_t blklen)
201837208879SJaegeuk Kim {
20193c62be17SJaegeuk Kim sector_t start = blkstart, len = 0;
20203c62be17SJaegeuk Kim struct block_device *bdev;
2021a66cdd98SJaegeuk Kim struct seg_entry *se;
2022a66cdd98SJaegeuk Kim unsigned int offset;
2023a66cdd98SJaegeuk Kim block_t i;
20243c62be17SJaegeuk Kim int err = 0;
2025a66cdd98SJaegeuk Kim
20263c62be17SJaegeuk Kim bdev = f2fs_target_device(sbi, blkstart, NULL);
20273c62be17SJaegeuk Kim
20283c62be17SJaegeuk Kim for (i = blkstart; i < blkstart + blklen; i++, len++) {
20293c62be17SJaegeuk Kim if (i != start) {
20303c62be17SJaegeuk Kim struct block_device *bdev2 =
20313c62be17SJaegeuk Kim f2fs_target_device(sbi, i, NULL);
20323c62be17SJaegeuk Kim
20333c62be17SJaegeuk Kim if (bdev2 != bdev) {
20343c62be17SJaegeuk Kim err = __issue_discard_async(sbi, bdev,
20353c62be17SJaegeuk Kim start, len);
20363c62be17SJaegeuk Kim if (err)
20373c62be17SJaegeuk Kim return err;
20383c62be17SJaegeuk Kim bdev = bdev2;
20393c62be17SJaegeuk Kim start = i;
20403c62be17SJaegeuk Kim len = 0;
20413c62be17SJaegeuk Kim }
20423c62be17SJaegeuk Kim }
20433c62be17SJaegeuk Kim
2044a66cdd98SJaegeuk Kim se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
2045a66cdd98SJaegeuk Kim offset = GET_BLKOFF_FROM_SEG0(sbi, i);
2046a66cdd98SJaegeuk Kim
20474f993264SChao Yu if (f2fs_block_unit_discard(sbi) &&
20484f993264SChao Yu !f2fs_test_and_set_bit(offset, se->discard_map))
2049a66cdd98SJaegeuk Kim sbi->discard_blks--;
2050a66cdd98SJaegeuk Kim }
2051f46e8809SDamien Le Moal
20523c62be17SJaegeuk Kim if (len)
20533c62be17SJaegeuk Kim err = __issue_discard_async(sbi, bdev, start, len);
20543c62be17SJaegeuk Kim return err;
20551e87a78dSJaegeuk Kim }
20561e87a78dSJaegeuk Kim
add_discard_addrs(struct f2fs_sb_info * sbi,struct cp_control * cpc,bool check_only)205725290fa5SJaegeuk Kim static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
205825290fa5SJaegeuk Kim bool check_only)
2059adf4983bSJaegeuk Kim {
2060b2955550SJaegeuk Kim int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
20614b2fecc8SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
2062b2955550SJaegeuk Kim unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2063b2955550SJaegeuk Kim unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2064a66cdd98SJaegeuk Kim unsigned long *discard_map = (unsigned long *)se->discard_map;
206560a3b782SJaegeuk Kim unsigned long *dmap = SIT_I(sbi)->tmp_map;
2066b2955550SJaegeuk Kim unsigned int start = 0, end = -1;
2067c473f1a9SChao Yu bool force = (cpc->reason & CP_DISCARD);
2068a7eeb823SChao Yu struct discard_entry *de = NULL;
206946f84c2cSChao Yu struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
2070b2955550SJaegeuk Kim int i;
2071b2955550SJaegeuk Kim
2072f0248ba6SJaegeuk Kim if (se->valid_blocks == BLKS_PER_SEG(sbi) ||
2073f0248ba6SJaegeuk Kim !f2fs_hw_support_discard(sbi) ||
20744f993264SChao Yu !f2fs_block_unit_discard(sbi))
207525290fa5SJaegeuk Kim return false;
2076b2955550SJaegeuk Kim
2077a66cdd98SJaegeuk Kim if (!force) {
20787d20c8abSChao Yu if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
20790b54fb84SJaegeuk Kim SM_I(sbi)->dcc_info->nr_discards >=
20800b54fb84SJaegeuk Kim SM_I(sbi)->dcc_info->max_discards)
208125290fa5SJaegeuk Kim return false;
20824b2fecc8SJaegeuk Kim }
2083b2955550SJaegeuk Kim
2084b2955550SJaegeuk Kim /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
2085b2955550SJaegeuk Kim for (i = 0; i < entries; i++)
2086a66cdd98SJaegeuk Kim dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
2087d7bc2484SJaegeuk Kim (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
2088b2955550SJaegeuk Kim
20890b54fb84SJaegeuk Kim while (force || SM_I(sbi)->dcc_info->nr_discards <=
20900b54fb84SJaegeuk Kim SM_I(sbi)->dcc_info->max_discards) {
2091f0248ba6SJaegeuk Kim start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1);
2092f0248ba6SJaegeuk Kim if (start >= BLKS_PER_SEG(sbi))
2093b2955550SJaegeuk Kim break;
2094b2955550SJaegeuk Kim
2095f0248ba6SJaegeuk Kim end = __find_rev_next_zero_bit(dmap,
2096f0248ba6SJaegeuk Kim BLKS_PER_SEG(sbi), start + 1);
2097f0248ba6SJaegeuk Kim if (force && start && end != BLKS_PER_SEG(sbi) &&
2098f0248ba6SJaegeuk Kim (end - start) < cpc->trim_minlen)
2099c7b41e16SYunlei He continue;
2100c7b41e16SYunlei He
210125290fa5SJaegeuk Kim if (check_only)
210225290fa5SJaegeuk Kim return true;
210325290fa5SJaegeuk Kim
2104a7eeb823SChao Yu if (!de) {
2105a7eeb823SChao Yu de = f2fs_kmem_cache_alloc(discard_entry_slab,
210632410577SChao Yu GFP_F2FS_ZERO, true, NULL);
2107a7eeb823SChao Yu de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
2108a7eeb823SChao Yu list_add_tail(&de->list, head);
2109a7eeb823SChao Yu }
2110a7eeb823SChao Yu
2111a7eeb823SChao Yu for (i = start; i < end; i++)
2112a7eeb823SChao Yu __set_bit_le(i, (void *)de->discard_map);
2113a7eeb823SChao Yu
2114a7eeb823SChao Yu SM_I(sbi)->dcc_info->nr_discards += end - start;
2115b2955550SJaegeuk Kim }
211625290fa5SJaegeuk Kim return false;
2117b2955550SJaegeuk Kim }
2118b2955550SJaegeuk Kim
release_discard_addr(struct discard_entry * entry)2119af8ff65bSChao Yu static void release_discard_addr(struct discard_entry *entry)
2120af8ff65bSChao Yu {
2121af8ff65bSChao Yu list_del(&entry->list);
2122af8ff65bSChao Yu kmem_cache_free(discard_entry_slab, entry);
2123af8ff65bSChao Yu }
2124af8ff65bSChao Yu
f2fs_release_discard_addrs(struct f2fs_sb_info * sbi)21254d57b86dSChao Yu void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
21264b2fecc8SJaegeuk Kim {
212746f84c2cSChao Yu struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
21284b2fecc8SJaegeuk Kim struct discard_entry *entry, *this;
21294b2fecc8SJaegeuk Kim
21304b2fecc8SJaegeuk Kim /* drop caches */
2131af8ff65bSChao Yu list_for_each_entry_safe(entry, this, head, list)
2132af8ff65bSChao Yu release_discard_addr(entry);
21334b2fecc8SJaegeuk Kim }
21344b2fecc8SJaegeuk Kim
21350a8165d7SJaegeuk Kim /*
21364d57b86dSChao Yu * Should call f2fs_clear_prefree_segments after checkpoint is done.
2137351df4b2SJaegeuk Kim */
set_prefree_as_free_segments(struct f2fs_sb_info * sbi)2138351df4b2SJaegeuk Kim static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
2139351df4b2SJaegeuk Kim {
2140351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2141b65ee148SChao Yu unsigned int segno;
2142351df4b2SJaegeuk Kim
2143351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock);
21447cd8558bSJaegeuk Kim for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
2145d0b9e42aSChao Yu __set_test_and_free(sbi, segno, false);
2146351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock);
2147351df4b2SJaegeuk Kim }
2148351df4b2SJaegeuk Kim
f2fs_clear_prefree_segments(struct f2fs_sb_info * sbi,struct cp_control * cpc)21494d57b86dSChao Yu void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
21504d57b86dSChao Yu struct cp_control *cpc)
2151351df4b2SJaegeuk Kim {
2152969d1b18SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2153969d1b18SChao Yu struct list_head *head = &dcc->entry_list;
21542d7b822aSChao Yu struct discard_entry *entry, *this;
2155351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
215629e59c14SChangman Lee unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
215729e59c14SChangman Lee unsigned int start = 0, end = -1;
215836abef4eSJaegeuk Kim unsigned int secno, start_segno;
2159c473f1a9SChao Yu bool force = (cpc->reason & CP_DISCARD);
21604f993264SChao Yu bool section_alignment = F2FS_OPTION(sbi).discard_unit ==
21614f993264SChao Yu DISCARD_UNIT_SECTION;
21624f993264SChao Yu
21634f993264SChao Yu if (f2fs_lfs_mode(sbi) && __is_large_section(sbi))
21644f993264SChao Yu section_alignment = true;
2165351df4b2SJaegeuk Kim
2166351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock);
216729e59c14SChangman Lee
2168351df4b2SJaegeuk Kim while (1) {
216929e59c14SChangman Lee int i;
2170ad6672bbSYunlong Song
21714f993264SChao Yu if (section_alignment && end != -1)
2172ad6672bbSYunlong Song end--;
21737cd8558bSJaegeuk Kim start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
21747cd8558bSJaegeuk Kim if (start >= MAIN_SEGS(sbi))
2175351df4b2SJaegeuk Kim break;
21767cd8558bSJaegeuk Kim end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
21777cd8558bSJaegeuk Kim start + 1);
2178351df4b2SJaegeuk Kim
21794f993264SChao Yu if (section_alignment) {
2180f0248ba6SJaegeuk Kim start = rounddown(start, SEGS_PER_SEC(sbi));
2181f0248ba6SJaegeuk Kim end = roundup(end, SEGS_PER_SEC(sbi));
2182ad6672bbSYunlong Song }
2183351df4b2SJaegeuk Kim
2184ad6672bbSYunlong Song for (i = start; i < end; i++) {
2185ad6672bbSYunlong Song if (test_and_clear_bit(i, prefree_map))
2186ad6672bbSYunlong Song dirty_i->nr_dirty[PRE]--;
2187ad6672bbSYunlong Song }
218829e59c14SChangman Lee
21897d20c8abSChao Yu if (!f2fs_realtime_discard_enable(sbi))
2190650d3c4eSYunlei He continue;
2191650d3c4eSYunlei He
2192650d3c4eSYunlei He if (force && start >= cpc->trim_start &&
2193650d3c4eSYunlei He (end - 1) <= cpc->trim_end)
219429e59c14SChangman Lee continue;
219529e59c14SChangman Lee
21966797ebc4SYonggil Song /* Should cover 2MB zoned device for zone-based reset */
21976797ebc4SYonggil Song if (!f2fs_sb_has_blkzoned(sbi) &&
21986797ebc4SYonggil Song (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) {
219937208879SJaegeuk Kim f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
220037208879SJaegeuk Kim (end - start) << sbi->log_blocks_per_seg);
220136abef4eSJaegeuk Kim continue;
220236abef4eSJaegeuk Kim }
220336abef4eSJaegeuk Kim next:
22044ddb1a4dSJaegeuk Kim secno = GET_SEC_FROM_SEG(sbi, start);
22054ddb1a4dSJaegeuk Kim start_segno = GET_SEG_FROM_SEC(sbi, secno);
220636abef4eSJaegeuk Kim if (!IS_CURSEC(sbi, secno) &&
2207302bd348SJaegeuk Kim !get_valid_blocks(sbi, start, true))
220836abef4eSJaegeuk Kim f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
2209f0248ba6SJaegeuk Kim BLKS_PER_SEC(sbi));
221036abef4eSJaegeuk Kim
2211f0248ba6SJaegeuk Kim start = start_segno + SEGS_PER_SEC(sbi);
221236abef4eSJaegeuk Kim if (start < end)
221336abef4eSJaegeuk Kim goto next;
22148b107f5bSJaegeuk Kim else
22158b107f5bSJaegeuk Kim end = start - 1;
2216351df4b2SJaegeuk Kim }
2217351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock);
2218b2955550SJaegeuk Kim
22194f993264SChao Yu if (!f2fs_block_unit_discard(sbi))
22204f993264SChao Yu goto wakeup;
22214f993264SChao Yu
2222b2955550SJaegeuk Kim /* send small discards */
22232d7b822aSChao Yu list_for_each_entry_safe(entry, this, head, list) {
2224a7eeb823SChao Yu unsigned int cur_pos = 0, next_pos, len, total_len = 0;
2225a7eeb823SChao Yu bool is_valid = test_bit_le(0, entry->discard_map);
2226a7eeb823SChao Yu
2227a7eeb823SChao Yu find_next:
2228a7eeb823SChao Yu if (is_valid) {
2229a7eeb823SChao Yu next_pos = find_next_zero_bit_le(entry->discard_map,
2230f0248ba6SJaegeuk Kim BLKS_PER_SEG(sbi), cur_pos);
2231a7eeb823SChao Yu len = next_pos - cur_pos;
2232a7eeb823SChao Yu
22337beb01f7SChao Yu if (f2fs_sb_has_blkzoned(sbi) ||
2234005abf9eSChao Yu (force && len < cpc->trim_minlen))
2235836b5a63SJaegeuk Kim goto skip;
2236a7eeb823SChao Yu
2237a7eeb823SChao Yu f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2238a7eeb823SChao Yu len);
2239a7eeb823SChao Yu total_len += len;
2240a7eeb823SChao Yu } else {
2241a7eeb823SChao Yu next_pos = find_next_bit_le(entry->discard_map,
2242f0248ba6SJaegeuk Kim BLKS_PER_SEG(sbi), cur_pos);
2243a7eeb823SChao Yu }
2244836b5a63SJaegeuk Kim skip:
2245a7eeb823SChao Yu cur_pos = next_pos;
2246a7eeb823SChao Yu is_valid = !is_valid;
2247a7eeb823SChao Yu
2248f0248ba6SJaegeuk Kim if (cur_pos < BLKS_PER_SEG(sbi))
2249a7eeb823SChao Yu goto find_next;
2250a7eeb823SChao Yu
2251af8ff65bSChao Yu release_discard_addr(entry);
2252969d1b18SChao Yu dcc->nr_discards -= total_len;
2253b2955550SJaegeuk Kim }
225434e159daSChao Yu
22554f993264SChao Yu wakeup:
225601983c71SJaegeuk Kim wake_up_discard_thread(sbi, false);
2257351df4b2SJaegeuk Kim }
2258351df4b2SJaegeuk Kim
f2fs_start_discard_thread(struct f2fs_sb_info * sbi)22594d674904SFengnan Chang int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
22600b54fb84SJaegeuk Kim {
226115469963SJaegeuk Kim dev_t dev = sbi->sb->s_bdev->bd_dev;
22624d674904SFengnan Chang struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
22634d674904SFengnan Chang int err = 0;
22644d674904SFengnan Chang
22654d674904SFengnan Chang if (!f2fs_realtime_discard_enable(sbi))
22664d674904SFengnan Chang return 0;
22674d674904SFengnan Chang
22684d674904SFengnan Chang dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
22694d674904SFengnan Chang "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
227091586ce0SChao Yu if (IS_ERR(dcc->f2fs_issue_discard)) {
22714d674904SFengnan Chang err = PTR_ERR(dcc->f2fs_issue_discard);
227291586ce0SChao Yu dcc->f2fs_issue_discard = NULL;
227391586ce0SChao Yu }
22744d674904SFengnan Chang
22754d674904SFengnan Chang return err;
22764d674904SFengnan Chang }
22774d674904SFengnan Chang
create_discard_cmd_control(struct f2fs_sb_info * sbi)22784d674904SFengnan Chang static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
22794d674904SFengnan Chang {
22800b54fb84SJaegeuk Kim struct discard_cmd_control *dcc;
2281ba48a33eSChao Yu int err = 0, i;
22820b54fb84SJaegeuk Kim
22830b54fb84SJaegeuk Kim if (SM_I(sbi)->dcc_info) {
22840b54fb84SJaegeuk Kim dcc = SM_I(sbi)->dcc_info;
22850b54fb84SJaegeuk Kim goto init_thread;
22860b54fb84SJaegeuk Kim }
22870b54fb84SJaegeuk Kim
2288acbf054dSChao Yu dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
22890b54fb84SJaegeuk Kim if (!dcc)
22900b54fb84SJaegeuk Kim return -ENOMEM;
22910b54fb84SJaegeuk Kim
2292120e0ea1SYangtao Li dcc->discard_io_aware_gran = MAX_PLIST_NUM;
2293969d1b18SChao Yu dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
2294c46867e9SYangtao Li dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY;
22954f993264SChao Yu if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
2296f0248ba6SJaegeuk Kim dcc->discard_granularity = BLKS_PER_SEG(sbi);
22974f993264SChao Yu else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
22984f993264SChao Yu dcc->discard_granularity = BLKS_PER_SEC(sbi);
22994f993264SChao Yu
230046f84c2cSChao Yu INIT_LIST_HEAD(&dcc->entry_list);
230178997b56SChao Yu for (i = 0; i < MAX_PLIST_NUM; i++)
2302ba48a33eSChao Yu INIT_LIST_HEAD(&dcc->pend_list[i]);
230346f84c2cSChao Yu INIT_LIST_HEAD(&dcc->wait_list);
23048412663dSChao Yu INIT_LIST_HEAD(&dcc->fstrim_list);
230515469963SJaegeuk Kim mutex_init(&dcc->cmd_lock);
23068b8dd65fSChao Yu atomic_set(&dcc->issued_discard, 0);
230772691af6SJaegeuk Kim atomic_set(&dcc->queued_discard, 0);
23085f32366aSChao Yu atomic_set(&dcc->discard_cmd_cnt, 0);
23090b54fb84SJaegeuk Kim dcc->nr_discards = 0;
2310d618ebafSChao Yu dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
2311d2d8e896SKonstantin Vyshetsky dcc->max_discard_request = DEF_MAX_DISCARD_REQUEST;
2312d2d8e896SKonstantin Vyshetsky dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME;
2313d2d8e896SKonstantin Vyshetsky dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME;
2314d2d8e896SKonstantin Vyshetsky dcc->max_discard_issue_time = DEF_MAX_DISCARD_ISSUE_TIME;
23158a47d228SYangtao Li dcc->discard_urgent_util = DEF_DISCARD_URGENT_UTIL;
2316d84d1cbdSChao Yu dcc->undiscard_blks = 0;
231720ee4382SChao Yu dcc->next_pos = 0;
23184dada3fdSChao Yu dcc->root = RB_ROOT_CACHED;
231967fce70bSChao Yu dcc->rbtree_check = false;
23200b54fb84SJaegeuk Kim
232115469963SJaegeuk Kim init_waitqueue_head(&dcc->discard_wait_queue);
23220b54fb84SJaegeuk Kim SM_I(sbi)->dcc_info = dcc;
23230b54fb84SJaegeuk Kim init_thread:
23244d674904SFengnan Chang err = f2fs_start_discard_thread(sbi);
23254d674904SFengnan Chang if (err) {
2326c8eb7024SChao Yu kfree(dcc);
232715469963SJaegeuk Kim SM_I(sbi)->dcc_info = NULL;
232815469963SJaegeuk Kim }
232915469963SJaegeuk Kim
23300b54fb84SJaegeuk Kim return err;
23310b54fb84SJaegeuk Kim }
23320b54fb84SJaegeuk Kim
destroy_discard_cmd_control(struct f2fs_sb_info * sbi)2333f099405fSChao Yu static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
23340b54fb84SJaegeuk Kim {
23350b54fb84SJaegeuk Kim struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
23360b54fb84SJaegeuk Kim
2337f099405fSChao Yu if (!dcc)
2338f099405fSChao Yu return;
2339f099405fSChao Yu
23404d57b86dSChao Yu f2fs_stop_discard_thread(sbi);
2341f099405fSChao Yu
234204f9287aSChao Yu /*
234304f9287aSChao Yu * Recovery can cache discard commands, so in error path of
234404f9287aSChao Yu * fill_super(), it needs to give a chance to handle them.
234504f9287aSChao Yu */
234604f9287aSChao Yu f2fs_issue_discard_timeout(sbi);
234704f9287aSChao Yu
2348c8eb7024SChao Yu kfree(dcc);
23490b54fb84SJaegeuk Kim SM_I(sbi)->dcc_info = NULL;
23500b54fb84SJaegeuk Kim }
23510b54fb84SJaegeuk Kim
__mark_sit_entry_dirty(struct f2fs_sb_info * sbi,unsigned int segno)2352184a5cd2SChao Yu static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2353351df4b2SJaegeuk Kim {
2354351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi);
2355184a5cd2SChao Yu
2356184a5cd2SChao Yu if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
2357351df4b2SJaegeuk Kim sit_i->dirty_sentries++;
2358184a5cd2SChao Yu return false;
2359184a5cd2SChao Yu }
2360184a5cd2SChao Yu
2361184a5cd2SChao Yu return true;
2362351df4b2SJaegeuk Kim }
2363351df4b2SJaegeuk Kim
__set_sit_entry_type(struct f2fs_sb_info * sbi,int type,unsigned int segno,int modified)2364351df4b2SJaegeuk Kim static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2365351df4b2SJaegeuk Kim unsigned int segno, int modified)
2366351df4b2SJaegeuk Kim {
2367351df4b2SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, segno);
23685f029c04SYi Zhuang
2369351df4b2SJaegeuk Kim se->type = type;
2370351df4b2SJaegeuk Kim if (modified)
2371351df4b2SJaegeuk Kim __mark_sit_entry_dirty(sbi, segno);
2372351df4b2SJaegeuk Kim }
2373351df4b2SJaegeuk Kim
get_segment_mtime(struct f2fs_sb_info * sbi,block_t blkaddr)2374c5d02785SChao Yu static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2375c5d02785SChao Yu block_t blkaddr)
23766f3a01aeSChao Yu {
23776f3a01aeSChao Yu unsigned int segno = GET_SEGNO(sbi, blkaddr);
2378c5d02785SChao Yu
2379c5d02785SChao Yu if (segno == NULL_SEGNO)
2380c5d02785SChao Yu return 0;
2381c5d02785SChao Yu return get_seg_entry(sbi, segno)->mtime;
2382c5d02785SChao Yu }
2383c5d02785SChao Yu
update_segment_mtime(struct f2fs_sb_info * sbi,block_t blkaddr,unsigned long long old_mtime)2384c5d02785SChao Yu static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2385c5d02785SChao Yu unsigned long long old_mtime)
2386c5d02785SChao Yu {
2387c5d02785SChao Yu struct seg_entry *se;
2388c5d02785SChao Yu unsigned int segno = GET_SEGNO(sbi, blkaddr);
2389c5d02785SChao Yu unsigned long long ctime = get_mtime(sbi, false);
2390c5d02785SChao Yu unsigned long long mtime = old_mtime ? old_mtime : ctime;
2391c5d02785SChao Yu
2392c5d02785SChao Yu if (segno == NULL_SEGNO)
2393c5d02785SChao Yu return;
2394c5d02785SChao Yu
2395c5d02785SChao Yu se = get_seg_entry(sbi, segno);
23966f3a01aeSChao Yu
23976f3a01aeSChao Yu if (!se->mtime)
23986f3a01aeSChao Yu se->mtime = mtime;
23996f3a01aeSChao Yu else
24006f3a01aeSChao Yu se->mtime = div_u64(se->mtime * se->valid_blocks + mtime,
24016f3a01aeSChao Yu se->valid_blocks + 1);
24026f3a01aeSChao Yu
2403c5d02785SChao Yu if (ctime > SIT_I(sbi)->max_mtime)
2404c5d02785SChao Yu SIT_I(sbi)->max_mtime = ctime;
24056f3a01aeSChao Yu }
24066f3a01aeSChao Yu
update_sit_entry(struct f2fs_sb_info * sbi,block_t blkaddr,int del)2407351df4b2SJaegeuk Kim static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2408351df4b2SJaegeuk Kim {
2409351df4b2SJaegeuk Kim struct seg_entry *se;
2410351df4b2SJaegeuk Kim unsigned int segno, offset;
2411351df4b2SJaegeuk Kim long int new_vblocks;
24126415fedcSYunlong Song bool exist;
24136415fedcSYunlong Song #ifdef CONFIG_F2FS_CHECK_FS
24146415fedcSYunlong Song bool mir_exist;
24156415fedcSYunlong Song #endif
2416351df4b2SJaegeuk Kim
2417351df4b2SJaegeuk Kim segno = GET_SEGNO(sbi, blkaddr);
2418428fb40bSZhiguo Niu if (segno == NULL_SEGNO)
2419428fb40bSZhiguo Niu return;
2420351df4b2SJaegeuk Kim
2421351df4b2SJaegeuk Kim se = get_seg_entry(sbi, segno);
2422351df4b2SJaegeuk Kim new_vblocks = se->valid_blocks + del;
2423491c0854SJaegeuk Kim offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2424351df4b2SJaegeuk Kim
24259feffe14SZhihao Cheng f2fs_bug_on(sbi, (new_vblocks < 0 ||
2426de881df9SAravind Ramesh (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
2427351df4b2SJaegeuk Kim
2428351df4b2SJaegeuk Kim se->valid_blocks = new_vblocks;
2429351df4b2SJaegeuk Kim
2430351df4b2SJaegeuk Kim /* Update valid block bitmap */
2431351df4b2SJaegeuk Kim if (del > 0) {
24326415fedcSYunlong Song exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
2433355e7891SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
24346415fedcSYunlong Song mir_exist = f2fs_test_and_set_bit(offset,
24356415fedcSYunlong Song se->cur_valid_map_mir);
24366415fedcSYunlong Song if (unlikely(exist != mir_exist)) {
2437dcbb4c10SJoe Perches f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
24386415fedcSYunlong Song blkaddr, exist);
243905796763SJaegeuk Kim f2fs_bug_on(sbi, 1);
2440355e7891SChao Yu }
24416415fedcSYunlong Song #endif
24426415fedcSYunlong Song if (unlikely(exist)) {
2443dcbb4c10SJoe Perches f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2444dcbb4c10SJoe Perches blkaddr);
24456415fedcSYunlong Song f2fs_bug_on(sbi, 1);
244635ee82caSYunlong Song se->valid_blocks--;
244735ee82caSYunlong Song del = 0;
24486415fedcSYunlong Song }
24496415fedcSYunlong Song
24504f993264SChao Yu if (f2fs_block_unit_discard(sbi) &&
24514f993264SChao Yu !f2fs_test_and_set_bit(offset, se->discard_map))
2452a66cdd98SJaegeuk Kim sbi->discard_blks--;
2453720037f9SJaegeuk Kim
2454899fee36SChao Yu /*
2455899fee36SChao Yu * SSR should never reuse block which is checkpointed
2456899fee36SChao Yu * or newly invalidated.
2457899fee36SChao Yu */
2458899fee36SChao Yu if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2459720037f9SJaegeuk Kim if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
2460720037f9SJaegeuk Kim se->ckpt_valid_blocks++;
2461720037f9SJaegeuk Kim }
2462351df4b2SJaegeuk Kim } else {
24636415fedcSYunlong Song exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
2464355e7891SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
24656415fedcSYunlong Song mir_exist = f2fs_test_and_clear_bit(offset,
24666415fedcSYunlong Song se->cur_valid_map_mir);
24676415fedcSYunlong Song if (unlikely(exist != mir_exist)) {
2468dcbb4c10SJoe Perches f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
24696415fedcSYunlong Song blkaddr, exist);
247005796763SJaegeuk Kim f2fs_bug_on(sbi, 1);
2471355e7891SChao Yu }
24726415fedcSYunlong Song #endif
24736415fedcSYunlong Song if (unlikely(!exist)) {
2474dcbb4c10SJoe Perches f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2475dcbb4c10SJoe Perches blkaddr);
24766415fedcSYunlong Song f2fs_bug_on(sbi, 1);
247735ee82caSYunlong Song se->valid_blocks++;
247835ee82caSYunlong Song del = 0;
24794354994fSDaniel Rosenberg } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
24804354994fSDaniel Rosenberg /*
24814354994fSDaniel Rosenberg * If checkpoints are off, we must not reuse data that
24824354994fSDaniel Rosenberg * was used in the previous checkpoint. If it was used
24834354994fSDaniel Rosenberg * before, we must track that to know how much space we
24844354994fSDaniel Rosenberg * really have.
24854354994fSDaniel Rosenberg */
2486c9c8ed50SChao Yu if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
2487c9c8ed50SChao Yu spin_lock(&sbi->stat_lock);
24884354994fSDaniel Rosenberg sbi->unusable_block_count++;
2489c9c8ed50SChao Yu spin_unlock(&sbi->stat_lock);
2490c9c8ed50SChao Yu }
24916415fedcSYunlong Song }
24926415fedcSYunlong Song
24934f993264SChao Yu if (f2fs_block_unit_discard(sbi) &&
24944f993264SChao Yu f2fs_test_and_clear_bit(offset, se->discard_map))
2495a66cdd98SJaegeuk Kim sbi->discard_blks++;
2496351df4b2SJaegeuk Kim }
2497351df4b2SJaegeuk Kim if (!f2fs_test_bit(offset, se->ckpt_valid_map))
2498351df4b2SJaegeuk Kim se->ckpt_valid_blocks += del;
2499351df4b2SJaegeuk Kim
2500351df4b2SJaegeuk Kim __mark_sit_entry_dirty(sbi, segno);
2501351df4b2SJaegeuk Kim
2502351df4b2SJaegeuk Kim /* update total number of valid blocks to be written in ckpt area */
2503351df4b2SJaegeuk Kim SIT_I(sbi)->written_valid_blocks += del;
2504351df4b2SJaegeuk Kim
25052c70c5e3SChao Yu if (__is_large_section(sbi))
2506351df4b2SJaegeuk Kim get_sec_entry(sbi, segno)->valid_blocks += del;
2507351df4b2SJaegeuk Kim }
2508351df4b2SJaegeuk Kim
f2fs_invalidate_blocks(struct f2fs_sb_info * sbi,block_t addr)25094d57b86dSChao Yu void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
2510351df4b2SJaegeuk Kim {
2511351df4b2SJaegeuk Kim unsigned int segno = GET_SEGNO(sbi, addr);
2512351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi);
2513351df4b2SJaegeuk Kim
25149850cf4aSJaegeuk Kim f2fs_bug_on(sbi, addr == NULL_ADDR);
25154c8ff709SChao Yu if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
2516351df4b2SJaegeuk Kim return;
2517351df4b2SJaegeuk Kim
2518cfd217f6SChao Yu f2fs_invalidate_internal_cache(sbi, addr);
25196aa58d8aSChao Yu
2520351df4b2SJaegeuk Kim /* add it into sit main buffer */
25213d26fa6bSChao Yu down_write(&sit_i->sentry_lock);
2522351df4b2SJaegeuk Kim
2523c5d02785SChao Yu update_segment_mtime(sbi, addr, 0);
2524351df4b2SJaegeuk Kim update_sit_entry(sbi, addr, -1);
2525351df4b2SJaegeuk Kim
2526351df4b2SJaegeuk Kim /* add it into dirty seglist */
2527351df4b2SJaegeuk Kim locate_dirty_segment(sbi, segno);
2528351df4b2SJaegeuk Kim
25293d26fa6bSChao Yu up_write(&sit_i->sentry_lock);
2530351df4b2SJaegeuk Kim }
2531351df4b2SJaegeuk Kim
f2fs_is_checkpointed_data(struct f2fs_sb_info * sbi,block_t blkaddr)25324d57b86dSChao Yu bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
25336e2c64adSJaegeuk Kim {
25346e2c64adSJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi);
25356e2c64adSJaegeuk Kim unsigned int segno, offset;
25366e2c64adSJaegeuk Kim struct seg_entry *se;
25376e2c64adSJaegeuk Kim bool is_cp = false;
25386e2c64adSJaegeuk Kim
253993770ab7SChao Yu if (!__is_valid_data_blkaddr(blkaddr))
25406e2c64adSJaegeuk Kim return true;
25416e2c64adSJaegeuk Kim
25423d26fa6bSChao Yu down_read(&sit_i->sentry_lock);
25436e2c64adSJaegeuk Kim
25446e2c64adSJaegeuk Kim segno = GET_SEGNO(sbi, blkaddr);
25456e2c64adSJaegeuk Kim se = get_seg_entry(sbi, segno);
25466e2c64adSJaegeuk Kim offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
25476e2c64adSJaegeuk Kim
25486e2c64adSJaegeuk Kim if (f2fs_test_bit(offset, se->ckpt_valid_map))
25496e2c64adSJaegeuk Kim is_cp = true;
25506e2c64adSJaegeuk Kim
25513d26fa6bSChao Yu up_read(&sit_i->sentry_lock);
25526e2c64adSJaegeuk Kim
25536e2c64adSJaegeuk Kim return is_cp;
25546e2c64adSJaegeuk Kim }
25556e2c64adSJaegeuk Kim
f2fs_curseg_valid_blocks(struct f2fs_sb_info * sbi,int type)25566392e9ffSChristoph Hellwig static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int type)
25576392e9ffSChristoph Hellwig {
25586392e9ffSChristoph Hellwig struct curseg_info *curseg = CURSEG_I(sbi, type);
25596392e9ffSChristoph Hellwig
25606392e9ffSChristoph Hellwig if (sbi->ckpt->alloc_type[type] == SSR)
2561f0248ba6SJaegeuk Kim return BLKS_PER_SEG(sbi);
25626392e9ffSChristoph Hellwig return curseg->next_blkoff;
25636392e9ffSChristoph Hellwig }
25646392e9ffSChristoph Hellwig
25650a8165d7SJaegeuk Kim /*
2566351df4b2SJaegeuk Kim * Calculate the number of current summary pages for writing
2567351df4b2SJaegeuk Kim */
f2fs_npages_for_summary_flush(struct f2fs_sb_info * sbi,bool for_ra)25684d57b86dSChao Yu int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2569351df4b2SJaegeuk Kim {
2570351df4b2SJaegeuk Kim int valid_sum_count = 0;
25719a47938bSFan Li int i, sum_in_page;
2572351df4b2SJaegeuk Kim
2573351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
25746392e9ffSChristoph Hellwig if (sbi->ckpt->alloc_type[i] != SSR && for_ra)
25756392e9ffSChristoph Hellwig valid_sum_count +=
25766392e9ffSChristoph Hellwig le16_to_cpu(F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2577351df4b2SJaegeuk Kim else
25786392e9ffSChristoph Hellwig valid_sum_count += f2fs_curseg_valid_blocks(sbi, i);
25793fa06d7bSChao Yu }
2580351df4b2SJaegeuk Kim
258109cbfeafSKirill A. Shutemov sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
25829a47938bSFan Li SUM_FOOTER_SIZE) / SUMMARY_SIZE;
25839a47938bSFan Li if (valid_sum_count <= sum_in_page)
2584351df4b2SJaegeuk Kim return 1;
25859a47938bSFan Li else if ((valid_sum_count - sum_in_page) <=
258609cbfeafSKirill A. Shutemov (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
2587351df4b2SJaegeuk Kim return 2;
2588351df4b2SJaegeuk Kim return 3;
2589351df4b2SJaegeuk Kim }
2590351df4b2SJaegeuk Kim
25910a8165d7SJaegeuk Kim /*
2592351df4b2SJaegeuk Kim * Caller should put this summary page
2593351df4b2SJaegeuk Kim */
f2fs_get_sum_page(struct f2fs_sb_info * sbi,unsigned int segno)25944d57b86dSChao Yu struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
2595351df4b2SJaegeuk Kim {
259686f33603SJaegeuk Kim if (unlikely(f2fs_cp_error(sbi)))
259786f33603SJaegeuk Kim return ERR_PTR(-EIO);
259886f33603SJaegeuk Kim return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
2599351df4b2SJaegeuk Kim }
2600351df4b2SJaegeuk Kim
f2fs_update_meta_page(struct f2fs_sb_info * sbi,void * src,block_t blk_addr)26014d57b86dSChao Yu void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
26024d57b86dSChao Yu void *src, block_t blk_addr)
2603381722d2SChao Yu {
26044d57b86dSChao Yu struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2605381722d2SChao Yu
26060537b811SChao Yu memcpy(page_address(page), src, PAGE_SIZE);
2607381722d2SChao Yu set_page_dirty(page);
2608381722d2SChao Yu f2fs_put_page(page, 1);
2609381722d2SChao Yu }
2610381722d2SChao Yu
write_sum_page(struct f2fs_sb_info * sbi,struct f2fs_summary_block * sum_blk,block_t blk_addr)2611351df4b2SJaegeuk Kim static void write_sum_page(struct f2fs_sb_info *sbi,
2612351df4b2SJaegeuk Kim struct f2fs_summary_block *sum_blk, block_t blk_addr)
2613351df4b2SJaegeuk Kim {
26144d57b86dSChao Yu f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
2615351df4b2SJaegeuk Kim }
2616351df4b2SJaegeuk Kim
write_current_sum_page(struct f2fs_sb_info * sbi,int type,block_t blk_addr)2617b7ad7512SChao Yu static void write_current_sum_page(struct f2fs_sb_info *sbi,
2618b7ad7512SChao Yu int type, block_t blk_addr)
2619b7ad7512SChao Yu {
2620b7ad7512SChao Yu struct curseg_info *curseg = CURSEG_I(sbi, type);
26214d57b86dSChao Yu struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2622b7ad7512SChao Yu struct f2fs_summary_block *src = curseg->sum_blk;
2623b7ad7512SChao Yu struct f2fs_summary_block *dst;
2624b7ad7512SChao Yu
2625b7ad7512SChao Yu dst = (struct f2fs_summary_block *)page_address(page);
262681114baaSChao Yu memset(dst, 0, PAGE_SIZE);
2627b7ad7512SChao Yu
2628b7ad7512SChao Yu mutex_lock(&curseg->curseg_mutex);
2629b7ad7512SChao Yu
2630b7ad7512SChao Yu down_read(&curseg->journal_rwsem);
2631b7ad7512SChao Yu memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2632b7ad7512SChao Yu up_read(&curseg->journal_rwsem);
2633b7ad7512SChao Yu
2634b7ad7512SChao Yu memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2635b7ad7512SChao Yu memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2636b7ad7512SChao Yu
2637b7ad7512SChao Yu mutex_unlock(&curseg->curseg_mutex);
2638b7ad7512SChao Yu
2639b7ad7512SChao Yu set_page_dirty(page);
2640b7ad7512SChao Yu f2fs_put_page(page, 1);
2641b7ad7512SChao Yu }
2642b7ad7512SChao Yu
is_next_segment_free(struct f2fs_sb_info * sbi,struct curseg_info * curseg,int type)2643093749e2SChao Yu static int is_next_segment_free(struct f2fs_sb_info *sbi,
2644093749e2SChao Yu struct curseg_info *curseg, int type)
2645a7881893SJaegeuk Kim {
2646a7881893SJaegeuk Kim unsigned int segno = curseg->segno + 1;
2647a7881893SJaegeuk Kim struct free_segmap_info *free_i = FREE_I(sbi);
2648a7881893SJaegeuk Kim
2649f0248ba6SJaegeuk Kim if (segno < MAIN_SEGS(sbi) && segno % SEGS_PER_SEC(sbi))
2650a7881893SJaegeuk Kim return !test_bit(segno, free_i->free_segmap);
2651a7881893SJaegeuk Kim return 0;
2652a7881893SJaegeuk Kim }
2653a7881893SJaegeuk Kim
26540a8165d7SJaegeuk Kim /*
2655351df4b2SJaegeuk Kim * Find a new segment from the free segments bitmap to right order
2656351df4b2SJaegeuk Kim * This function should be returned with success, otherwise BUG
2657351df4b2SJaegeuk Kim */
get_new_segment(struct f2fs_sb_info * sbi,unsigned int * newseg,bool new_sec,bool pinning)2658351df4b2SJaegeuk Kim static void get_new_segment(struct f2fs_sb_info *sbi,
265940d76c39SDaeho Jeong unsigned int *newseg, bool new_sec, bool pinning)
2660351df4b2SJaegeuk Kim {
2661351df4b2SJaegeuk Kim struct free_segmap_info *free_i = FREE_I(sbi);
2662351df4b2SJaegeuk Kim unsigned int segno, secno, zoneno;
26637cd8558bSJaegeuk Kim unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
26644ddb1a4dSJaegeuk Kim unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
26654ddb1a4dSJaegeuk Kim unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2666351df4b2SJaegeuk Kim bool init = true;
2667351df4b2SJaegeuk Kim int i;
2668881613a9SZhiguo Niu int ret = 0;
2669351df4b2SJaegeuk Kim
26701a118ccfSChao Yu spin_lock(&free_i->segmap_lock);
2671351df4b2SJaegeuk Kim
2672f0248ba6SJaegeuk Kim if (!new_sec && ((*newseg + 1) % SEGS_PER_SEC(sbi))) {
2673351df4b2SJaegeuk Kim segno = find_next_zero_bit(free_i->free_segmap,
26744ddb1a4dSJaegeuk Kim GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
26754ddb1a4dSJaegeuk Kim if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2676351df4b2SJaegeuk Kim goto got_it;
2677351df4b2SJaegeuk Kim }
267840d76c39SDaeho Jeong
267940d76c39SDaeho Jeong /*
268040d76c39SDaeho Jeong * If we format f2fs on zoned storage, let's try to get pinned sections
268140d76c39SDaeho Jeong * from beginning of the storage, which should be a conventional one.
268240d76c39SDaeho Jeong */
268340d76c39SDaeho Jeong if (f2fs_sb_has_blkzoned(sbi)) {
268440d76c39SDaeho Jeong segno = pinning ? 0 : max(first_zoned_segno(sbi), *newseg);
268540d76c39SDaeho Jeong hint = GET_SEC_FROM_SEG(sbi, segno);
268640d76c39SDaeho Jeong }
268740d76c39SDaeho Jeong
2688351df4b2SJaegeuk Kim find_other_zone:
26897cd8558bSJaegeuk Kim secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
26907cd8558bSJaegeuk Kim if (secno >= MAIN_SECS(sbi)) {
2691b5c7e7ecSYury Norov secno = find_first_zero_bit(free_i->free_secmap,
2692b5c7e7ecSYury Norov MAIN_SECS(sbi));
2693881613a9SZhiguo Niu if (secno >= MAIN_SECS(sbi)) {
2694881613a9SZhiguo Niu ret = -ENOSPC;
2695881613a9SZhiguo Niu goto out_unlock;
2696881613a9SZhiguo Niu }
2697351df4b2SJaegeuk Kim }
26984ddb1a4dSJaegeuk Kim segno = GET_SEG_FROM_SEC(sbi, secno);
26994ddb1a4dSJaegeuk Kim zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2700351df4b2SJaegeuk Kim
2701351df4b2SJaegeuk Kim /* give up on finding another zone */
2702351df4b2SJaegeuk Kim if (!init)
2703351df4b2SJaegeuk Kim goto got_it;
2704351df4b2SJaegeuk Kim if (sbi->secs_per_zone == 1)
2705351df4b2SJaegeuk Kim goto got_it;
2706351df4b2SJaegeuk Kim if (zoneno == old_zoneno)
2707351df4b2SJaegeuk Kim goto got_it;
2708351df4b2SJaegeuk Kim for (i = 0; i < NR_CURSEG_TYPE; i++)
2709351df4b2SJaegeuk Kim if (CURSEG_I(sbi, i)->zone == zoneno)
2710351df4b2SJaegeuk Kim break;
2711351df4b2SJaegeuk Kim
2712351df4b2SJaegeuk Kim if (i < NR_CURSEG_TYPE) {
2713351df4b2SJaegeuk Kim /* zone is in user, try another */
2714066cec37SJaegeuk Kim if (zoneno + 1 >= total_zones)
2715351df4b2SJaegeuk Kim hint = 0;
2716351df4b2SJaegeuk Kim else
2717351df4b2SJaegeuk Kim hint = (zoneno + 1) * sbi->secs_per_zone;
2718351df4b2SJaegeuk Kim init = false;
2719351df4b2SJaegeuk Kim goto find_other_zone;
2720351df4b2SJaegeuk Kim }
2721351df4b2SJaegeuk Kim got_it:
2722351df4b2SJaegeuk Kim /* set it as dirty segment in free segmap */
27239850cf4aSJaegeuk Kim f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2724351df4b2SJaegeuk Kim __set_inuse(sbi, segno);
2725351df4b2SJaegeuk Kim *newseg = segno;
2726881613a9SZhiguo Niu out_unlock:
27271a118ccfSChao Yu spin_unlock(&free_i->segmap_lock);
2728881613a9SZhiguo Niu
2729881613a9SZhiguo Niu if (ret) {
2730881613a9SZhiguo Niu f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT);
2731881613a9SZhiguo Niu f2fs_bug_on(sbi, 1);
2732881613a9SZhiguo Niu }
2733351df4b2SJaegeuk Kim }
2734351df4b2SJaegeuk Kim
reset_curseg(struct f2fs_sb_info * sbi,int type,int modified)2735351df4b2SJaegeuk Kim static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2736351df4b2SJaegeuk Kim {
2737351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type);
2738351df4b2SJaegeuk Kim struct summary_footer *sum_footer;
2739093749e2SChao Yu unsigned short seg_type = curseg->seg_type;
2740351df4b2SJaegeuk Kim
2741d0b9e42aSChao Yu curseg->inited = true;
2742351df4b2SJaegeuk Kim curseg->segno = curseg->next_segno;
27434ddb1a4dSJaegeuk Kim curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2744351df4b2SJaegeuk Kim curseg->next_blkoff = 0;
2745351df4b2SJaegeuk Kim curseg->next_segno = NULL_SEGNO;
2746351df4b2SJaegeuk Kim
2747351df4b2SJaegeuk Kim sum_footer = &(curseg->sum_blk->footer);
2748351df4b2SJaegeuk Kim memset(sum_footer, 0, sizeof(struct summary_footer));
2749093749e2SChao Yu
2750093749e2SChao Yu sanity_check_seg_type(sbi, seg_type);
2751093749e2SChao Yu
2752093749e2SChao Yu if (IS_DATASEG(seg_type))
2753351df4b2SJaegeuk Kim SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2754093749e2SChao Yu if (IS_NODESEG(seg_type))
2755351df4b2SJaegeuk Kim SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2756093749e2SChao Yu __set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
2757351df4b2SJaegeuk Kim }
2758351df4b2SJaegeuk Kim
__get_next_segno(struct f2fs_sb_info * sbi,int type)27597a20b8a6SJaegeuk Kim static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
27607a20b8a6SJaegeuk Kim {
2761d0b9e42aSChao Yu struct curseg_info *curseg = CURSEG_I(sbi, type);
2762093749e2SChao Yu unsigned short seg_type = curseg->seg_type;
2763093749e2SChao Yu
2764093749e2SChao Yu sanity_check_seg_type(sbi, seg_type);
27656691d940SDaeho Jeong if (f2fs_need_rand_seg(sbi))
2766f0248ba6SJaegeuk Kim return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
2767d0b9e42aSChao Yu
27682c70c5e3SChao Yu if (__is_large_section(sbi))
2769d0b9e42aSChao Yu return curseg->segno;
2770d0b9e42aSChao Yu
2771d0b9e42aSChao Yu /* inmem log may not locate on any segment after mount */
2772d0b9e42aSChao Yu if (!curseg->inited)
2773d0b9e42aSChao Yu return 0;
2774a7881893SJaegeuk Kim
27754354994fSDaniel Rosenberg if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
27764354994fSDaniel Rosenberg return 0;
27774354994fSDaniel Rosenberg
2778066cec37SJaegeuk Kim if (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type))
27797a20b8a6SJaegeuk Kim return 0;
27807a20b8a6SJaegeuk Kim
2781e066b83cSJaegeuk Kim if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2782e066b83cSJaegeuk Kim return SIT_I(sbi)->last_victim[ALLOC_NEXT];
278307939627SJaegeuk Kim
278407939627SJaegeuk Kim /* find segments from 0 to reuse freed segments */
278563189b78SChao Yu if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
278607939627SJaegeuk Kim return 0;
278707939627SJaegeuk Kim
2788d0b9e42aSChao Yu return curseg->segno;
27897a20b8a6SJaegeuk Kim }
27907a20b8a6SJaegeuk Kim
27910a8165d7SJaegeuk Kim /*
2792351df4b2SJaegeuk Kim * Allocate a current working segment.
2793351df4b2SJaegeuk Kim * This function always allocates a free segment in LFS manner.
2794351df4b2SJaegeuk Kim */
new_curseg(struct f2fs_sb_info * sbi,int type,bool new_sec)279540d76c39SDaeho Jeong static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2796351df4b2SJaegeuk Kim {
2797351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type);
2798351df4b2SJaegeuk Kim unsigned int segno = curseg->segno;
279940d76c39SDaeho Jeong bool pinning = type == CURSEG_COLD_DATA_PINNED;
2800351df4b2SJaegeuk Kim
2801d0b9e42aSChao Yu if (curseg->inited)
2802066cec37SJaegeuk Kim write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno));
280340d76c39SDaeho Jeong
28047a20b8a6SJaegeuk Kim segno = __get_next_segno(sbi, type);
280540d76c39SDaeho Jeong get_new_segment(sbi, &segno, new_sec, pinning);
280640d76c39SDaeho Jeong if (new_sec && pinning &&
280740d76c39SDaeho Jeong !f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) {
280840d76c39SDaeho Jeong __set_free(sbi, segno);
280940d76c39SDaeho Jeong return -EAGAIN;
281040d76c39SDaeho Jeong }
281140d76c39SDaeho Jeong
2812351df4b2SJaegeuk Kim curseg->next_segno = segno;
2813351df4b2SJaegeuk Kim reset_curseg(sbi, type, 1);
2814351df4b2SJaegeuk Kim curseg->alloc_type = LFS;
28156691d940SDaeho Jeong if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
28166691d940SDaeho Jeong curseg->fragment_remained_chunk =
2817e8a533cbSJason A. Donenfeld get_random_u32_inclusive(1, sbi->max_fragment_chunk);
281840d76c39SDaeho Jeong return 0;
2819351df4b2SJaegeuk Kim }
2820351df4b2SJaegeuk Kim
__next_free_blkoff(struct f2fs_sb_info * sbi,int segno,block_t start)2821453e2ff8SChao Yu static int __next_free_blkoff(struct f2fs_sb_info *sbi,
2822453e2ff8SChao Yu int segno, block_t start)
2823351df4b2SJaegeuk Kim {
2824453e2ff8SChao Yu struct seg_entry *se = get_seg_entry(sbi, segno);
2825e81c93cfSChangman Lee int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
282660a3b782SJaegeuk Kim unsigned long *target_map = SIT_I(sbi)->tmp_map;
2827e81c93cfSChangman Lee unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2828e81c93cfSChangman Lee unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2829453e2ff8SChao Yu int i;
2830e81c93cfSChangman Lee
2831e81c93cfSChangman Lee for (i = 0; i < entries; i++)
2832e81c93cfSChangman Lee target_map[i] = ckpt_map[i] | cur_map[i];
2833e81c93cfSChangman Lee
2834f0248ba6SJaegeuk Kim return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start);
2835351df4b2SJaegeuk Kim }
2836351df4b2SJaegeuk Kim
f2fs_find_next_ssr_block(struct f2fs_sb_info * sbi,struct curseg_info * seg)28374a209588SChristoph Hellwig static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi,
2838351df4b2SJaegeuk Kim struct curseg_info *seg)
2839351df4b2SJaegeuk Kim {
28404a209588SChristoph Hellwig return __next_free_blkoff(sbi, seg->segno, seg->next_blkoff + 1);
2841351df4b2SJaegeuk Kim }
2842351df4b2SJaegeuk Kim
f2fs_segment_has_free_slot(struct f2fs_sb_info * sbi,int segno)284361461fc9SChao Yu bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
284461461fc9SChao Yu {
2845f0248ba6SJaegeuk Kim return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi);
284661461fc9SChao Yu }
284761461fc9SChao Yu
28480a8165d7SJaegeuk Kim /*
2849351df4b2SJaegeuk Kim * This function always allocates a used segment(from dirty seglist) by SSR
2850351df4b2SJaegeuk Kim * manner, so it should recover the existing segment information of valid blocks
2851351df4b2SJaegeuk Kim */
change_curseg(struct f2fs_sb_info * sbi,int type)28525bcd655fSChristoph Hellwig static void change_curseg(struct f2fs_sb_info *sbi, int type)
2853351df4b2SJaegeuk Kim {
2854351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2855351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type);
2856351df4b2SJaegeuk Kim unsigned int new_segno = curseg->next_segno;
2857351df4b2SJaegeuk Kim struct f2fs_summary_block *sum_node;
2858351df4b2SJaegeuk Kim struct page *sum_page;
2859351df4b2SJaegeuk Kim
286067f4c664SYongpeng Yang if (curseg->inited)
28615bcd655fSChristoph Hellwig write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno));
2862093749e2SChao Yu
2863351df4b2SJaegeuk Kim __set_test_and_inuse(sbi, new_segno);
2864351df4b2SJaegeuk Kim
2865351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock);
2866351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, new_segno, PRE);
2867351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, new_segno, DIRTY);
2868351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock);
2869351df4b2SJaegeuk Kim
2870351df4b2SJaegeuk Kim reset_curseg(sbi, type, 1);
2871351df4b2SJaegeuk Kim curseg->alloc_type = SSR;
2872453e2ff8SChao Yu curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0);
2873351df4b2SJaegeuk Kim
28744d57b86dSChao Yu sum_page = f2fs_get_sum_page(sbi, new_segno);
287586f33603SJaegeuk Kim if (IS_ERR(sum_page)) {
287686f33603SJaegeuk Kim /* GC won't be able to use stale summary pages by cp_error */
287786f33603SJaegeuk Kim memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
287886f33603SJaegeuk Kim return;
287986f33603SJaegeuk Kim }
2880351df4b2SJaegeuk Kim sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2881351df4b2SJaegeuk Kim memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2882351df4b2SJaegeuk Kim f2fs_put_page(sum_page, 1);
2883351df4b2SJaegeuk Kim }
2884351df4b2SJaegeuk Kim
2885093749e2SChao Yu static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2886093749e2SChao Yu int alloc_mode, unsigned long long age);
2887093749e2SChao Yu
get_atssr_segment(struct f2fs_sb_info * sbi,int type,int target_type,int alloc_mode,unsigned long long age)2888093749e2SChao Yu static void get_atssr_segment(struct f2fs_sb_info *sbi, int type,
2889093749e2SChao Yu int target_type, int alloc_mode,
2890093749e2SChao Yu unsigned long long age)
2891093749e2SChao Yu {
2892093749e2SChao Yu struct curseg_info *curseg = CURSEG_I(sbi, type);
2893093749e2SChao Yu
2894093749e2SChao Yu curseg->seg_type = target_type;
2895093749e2SChao Yu
2896093749e2SChao Yu if (get_ssr_segment(sbi, type, alloc_mode, age)) {
2897093749e2SChao Yu struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
2898093749e2SChao Yu
2899093749e2SChao Yu curseg->seg_type = se->type;
29005bcd655fSChristoph Hellwig change_curseg(sbi, type);
2901093749e2SChao Yu } else {
2902093749e2SChao Yu /* allocate cold segment by default */
2903093749e2SChao Yu curseg->seg_type = CURSEG_COLD_DATA;
2904093749e2SChao Yu new_curseg(sbi, type, true);
2905093749e2SChao Yu }
2906093749e2SChao Yu stat_inc_seg_type(sbi, curseg);
2907093749e2SChao Yu }
2908093749e2SChao Yu
__f2fs_init_atgc_curseg(struct f2fs_sb_info * sbi)2909093749e2SChao Yu static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
2910093749e2SChao Yu {
2911093749e2SChao Yu struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
2912093749e2SChao Yu
2913093749e2SChao Yu if (!sbi->am.atgc_enabled)
2914093749e2SChao Yu return;
2915093749e2SChao Yu
2916e4544b63STim Murray f2fs_down_read(&SM_I(sbi)->curseg_lock);
2917093749e2SChao Yu
2918093749e2SChao Yu mutex_lock(&curseg->curseg_mutex);
2919093749e2SChao Yu down_write(&SIT_I(sbi)->sentry_lock);
2920093749e2SChao Yu
2921093749e2SChao Yu get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0);
2922093749e2SChao Yu
2923093749e2SChao Yu up_write(&SIT_I(sbi)->sentry_lock);
2924093749e2SChao Yu mutex_unlock(&curseg->curseg_mutex);
2925093749e2SChao Yu
2926e4544b63STim Murray f2fs_up_read(&SM_I(sbi)->curseg_lock);
2927093749e2SChao Yu
2928093749e2SChao Yu }
f2fs_init_inmem_curseg(struct f2fs_sb_info * sbi)2929093749e2SChao Yu void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
2930093749e2SChao Yu {
2931093749e2SChao Yu __f2fs_init_atgc_curseg(sbi);
2932093749e2SChao Yu }
2933093749e2SChao Yu
__f2fs_save_inmem_curseg(struct f2fs_sb_info * sbi,int type)2934093749e2SChao Yu static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2935d0b9e42aSChao Yu {
2936d0b9e42aSChao Yu struct curseg_info *curseg = CURSEG_I(sbi, type);
2937d0b9e42aSChao Yu
2938d0b9e42aSChao Yu mutex_lock(&curseg->curseg_mutex);
2939d0b9e42aSChao Yu if (!curseg->inited)
2940d0b9e42aSChao Yu goto out;
2941d0b9e42aSChao Yu
2942d0b9e42aSChao Yu if (get_valid_blocks(sbi, curseg->segno, false)) {
2943d0b9e42aSChao Yu write_sum_page(sbi, curseg->sum_blk,
2944d0b9e42aSChao Yu GET_SUM_BLOCK(sbi, curseg->segno));
2945d0b9e42aSChao Yu } else {
2946d0b9e42aSChao Yu mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2947d0b9e42aSChao Yu __set_test_and_free(sbi, curseg->segno, true);
2948d0b9e42aSChao Yu mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2949d0b9e42aSChao Yu }
2950d0b9e42aSChao Yu out:
2951d0b9e42aSChao Yu mutex_unlock(&curseg->curseg_mutex);
2952d0b9e42aSChao Yu }
2953d0b9e42aSChao Yu
f2fs_save_inmem_curseg(struct f2fs_sb_info * sbi)2954093749e2SChao Yu void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
2955093749e2SChao Yu {
2956093749e2SChao Yu __f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2957093749e2SChao Yu
2958093749e2SChao Yu if (sbi->am.atgc_enabled)
2959093749e2SChao Yu __f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2960093749e2SChao Yu }
2961093749e2SChao Yu
__f2fs_restore_inmem_curseg(struct f2fs_sb_info * sbi,int type)2962093749e2SChao Yu static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2963d0b9e42aSChao Yu {
2964d0b9e42aSChao Yu struct curseg_info *curseg = CURSEG_I(sbi, type);
2965d0b9e42aSChao Yu
2966d0b9e42aSChao Yu mutex_lock(&curseg->curseg_mutex);
2967d0b9e42aSChao Yu if (!curseg->inited)
2968d0b9e42aSChao Yu goto out;
2969d0b9e42aSChao Yu if (get_valid_blocks(sbi, curseg->segno, false))
2970d0b9e42aSChao Yu goto out;
2971d0b9e42aSChao Yu
2972d0b9e42aSChao Yu mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2973d0b9e42aSChao Yu __set_test_and_inuse(sbi, curseg->segno);
2974d0b9e42aSChao Yu mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2975d0b9e42aSChao Yu out:
2976d0b9e42aSChao Yu mutex_unlock(&curseg->curseg_mutex);
2977d0b9e42aSChao Yu }
2978d0b9e42aSChao Yu
f2fs_restore_inmem_curseg(struct f2fs_sb_info * sbi)2979093749e2SChao Yu void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
2980093749e2SChao Yu {
2981093749e2SChao Yu __f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2982093749e2SChao Yu
2983093749e2SChao Yu if (sbi->am.atgc_enabled)
2984093749e2SChao Yu __f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2985093749e2SChao Yu }
2986093749e2SChao Yu
get_ssr_segment(struct f2fs_sb_info * sbi,int type,int alloc_mode,unsigned long long age)2987093749e2SChao Yu static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2988093749e2SChao Yu int alloc_mode, unsigned long long age)
298943727527SJaegeuk Kim {
299043727527SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type);
2991e066b83cSJaegeuk Kim unsigned segno = NULL_SEGNO;
2992093749e2SChao Yu unsigned short seg_type = curseg->seg_type;
2993d27c3d89SChao Yu int i, cnt;
2994d27c3d89SChao Yu bool reversed = false;
2995c192f7a4SJaegeuk Kim
2996093749e2SChao Yu sanity_check_seg_type(sbi, seg_type);
2997093749e2SChao Yu
29984d57b86dSChao Yu /* f2fs_need_SSR() already forces to do this */
299919e0e21aSYangtao Li if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
3000e066b83cSJaegeuk Kim curseg->next_segno = segno;
3001c192f7a4SJaegeuk Kim return 1;
3002e066b83cSJaegeuk Kim }
300343727527SJaegeuk Kim
300470d625cbSJaegeuk Kim /* For node segments, let's do SSR more intensively */
3005093749e2SChao Yu if (IS_NODESEG(seg_type)) {
3006093749e2SChao Yu if (seg_type >= CURSEG_WARM_NODE) {
3007d27c3d89SChao Yu reversed = true;
3008d27c3d89SChao Yu i = CURSEG_COLD_NODE;
3009d27c3d89SChao Yu } else {
301070d625cbSJaegeuk Kim i = CURSEG_HOT_NODE;
3011d27c3d89SChao Yu }
3012d27c3d89SChao Yu cnt = NR_CURSEG_NODE_TYPE;
3013d27c3d89SChao Yu } else {
3014093749e2SChao Yu if (seg_type >= CURSEG_WARM_DATA) {
3015d27c3d89SChao Yu reversed = true;
3016d27c3d89SChao Yu i = CURSEG_COLD_DATA;
301770d625cbSJaegeuk Kim } else {
301870d625cbSJaegeuk Kim i = CURSEG_HOT_DATA;
3019d27c3d89SChao Yu }
3020d27c3d89SChao Yu cnt = NR_CURSEG_DATA_TYPE;
302170d625cbSJaegeuk Kim }
302243727527SJaegeuk Kim
3023d27c3d89SChao Yu for (; cnt-- > 0; reversed ? i-- : i++) {
3024093749e2SChao Yu if (i == seg_type)
3025c192f7a4SJaegeuk Kim continue;
302619e0e21aSYangtao Li if (!f2fs_get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
3027e066b83cSJaegeuk Kim curseg->next_segno = segno;
302843727527SJaegeuk Kim return 1;
3029c192f7a4SJaegeuk Kim }
3030e066b83cSJaegeuk Kim }
30314354994fSDaniel Rosenberg
30324354994fSDaniel Rosenberg /* find valid_blocks=0 in dirty list */
30334354994fSDaniel Rosenberg if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
30344354994fSDaniel Rosenberg segno = get_free_segment(sbi);
30354354994fSDaniel Rosenberg if (segno != NULL_SEGNO) {
30364354994fSDaniel Rosenberg curseg->next_segno = segno;
30374354994fSDaniel Rosenberg return 1;
30384354994fSDaniel Rosenberg }
30394354994fSDaniel Rosenberg }
304043727527SJaegeuk Kim return 0;
304143727527SJaegeuk Kim }
304243727527SJaegeuk Kim
need_new_seg(struct f2fs_sb_info * sbi,int type)30438442d94bSChristoph Hellwig static bool need_new_seg(struct f2fs_sb_info *sbi, int type)
3044351df4b2SJaegeuk Kim {
3045a7881893SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type);
3046a7881893SJaegeuk Kim
30478442d94bSChristoph Hellwig if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
3048093749e2SChao Yu curseg->seg_type == CURSEG_WARM_NODE)
30498442d94bSChristoph Hellwig return true;
30508442d94bSChristoph Hellwig if (curseg->alloc_type == LFS &&
3051093749e2SChao Yu is_next_segment_free(sbi, curseg, type) &&
30524354994fSDaniel Rosenberg likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
30538442d94bSChristoph Hellwig return true;
30548442d94bSChristoph Hellwig if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0))
30558442d94bSChristoph Hellwig return true;
30568442d94bSChristoph Hellwig return false;
3057351df4b2SJaegeuk Kim }
3058351df4b2SJaegeuk Kim
f2fs_allocate_segment_for_resize(struct f2fs_sb_info * sbi,int type,unsigned int start,unsigned int end)30590ef81833SChao Yu void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
306004f0b2eaSQiuyang Sun unsigned int start, unsigned int end)
306104f0b2eaSQiuyang Sun {
306204f0b2eaSQiuyang Sun struct curseg_info *curseg = CURSEG_I(sbi, type);
306304f0b2eaSQiuyang Sun unsigned int segno;
306404f0b2eaSQiuyang Sun
3065e4544b63STim Murray f2fs_down_read(&SM_I(sbi)->curseg_lock);
306604f0b2eaSQiuyang Sun mutex_lock(&curseg->curseg_mutex);
306704f0b2eaSQiuyang Sun down_write(&SIT_I(sbi)->sentry_lock);
306804f0b2eaSQiuyang Sun
306904f0b2eaSQiuyang Sun segno = CURSEG_I(sbi, type)->segno;
307004f0b2eaSQiuyang Sun if (segno < start || segno > end)
307104f0b2eaSQiuyang Sun goto unlock;
307204f0b2eaSQiuyang Sun
3073093749e2SChao Yu if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
30745bcd655fSChristoph Hellwig change_curseg(sbi, type);
307504f0b2eaSQiuyang Sun else
307604f0b2eaSQiuyang Sun new_curseg(sbi, type, true);
307704f0b2eaSQiuyang Sun
307804f0b2eaSQiuyang Sun stat_inc_seg_type(sbi, curseg);
307904f0b2eaSQiuyang Sun
308004f0b2eaSQiuyang Sun locate_dirty_segment(sbi, segno);
308104f0b2eaSQiuyang Sun unlock:
308204f0b2eaSQiuyang Sun up_write(&SIT_I(sbi)->sentry_lock);
308304f0b2eaSQiuyang Sun
308404f0b2eaSQiuyang Sun if (segno != curseg->segno)
3085dcbb4c10SJoe Perches f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
308604f0b2eaSQiuyang Sun type, segno, curseg->segno);
308704f0b2eaSQiuyang Sun
308804f0b2eaSQiuyang Sun mutex_unlock(&curseg->curseg_mutex);
3089e4544b63STim Murray f2fs_up_read(&SM_I(sbi)->curseg_lock);
309004f0b2eaSQiuyang Sun }
309104f0b2eaSQiuyang Sun
__allocate_new_segment(struct f2fs_sb_info * sbi,int type,bool new_sec,bool force)309240d76c39SDaeho Jeong static int __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
3093509f1010SChao Yu bool new_sec, bool force)
3094351df4b2SJaegeuk Kim {
3095901d745fSChao Yu struct curseg_info *curseg = CURSEG_I(sbi, type);
30966ae1be13SJaegeuk Kim unsigned int old_segno;
3097901d745fSChao Yu
30982df79573SChristoph Hellwig if (!force && curseg->inited &&
30992df79573SChristoph Hellwig !curseg->next_blkoff &&
31002df79573SChristoph Hellwig !get_valid_blocks(sbi, curseg->segno, new_sec) &&
31012df79573SChristoph Hellwig !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
310240d76c39SDaeho Jeong return 0;
31032df79573SChristoph Hellwig
3104901d745fSChao Yu old_segno = curseg->segno;
310540d76c39SDaeho Jeong if (new_curseg(sbi, type, true))
310640d76c39SDaeho Jeong return -EAGAIN;
31078442d94bSChristoph Hellwig stat_inc_seg_type(sbi, curseg);
3108901d745fSChao Yu locate_dirty_segment(sbi, old_segno);
310940d76c39SDaeho Jeong return 0;
3110901d745fSChao Yu }
3111901d745fSChao Yu
f2fs_allocate_new_section(struct f2fs_sb_info * sbi,int type,bool force)311240d76c39SDaeho Jeong int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
3113901d745fSChao Yu {
311440d76c39SDaeho Jeong int ret;
311540d76c39SDaeho Jeong
3116e4544b63STim Murray f2fs_down_read(&SM_I(sbi)->curseg_lock);
3117901d745fSChao Yu down_write(&SIT_I(sbi)->sentry_lock);
311840d76c39SDaeho Jeong ret = __allocate_new_segment(sbi, type, true, force);
3119901d745fSChao Yu up_write(&SIT_I(sbi)->sentry_lock);
3120e4544b63STim Murray f2fs_up_read(&SM_I(sbi)->curseg_lock);
312140d76c39SDaeho Jeong
312240d76c39SDaeho Jeong return ret;
312340d76c39SDaeho Jeong }
312440d76c39SDaeho Jeong
f2fs_allocate_pinning_section(struct f2fs_sb_info * sbi)312540d76c39SDaeho Jeong int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi)
312640d76c39SDaeho Jeong {
312740d76c39SDaeho Jeong int err;
312840d76c39SDaeho Jeong bool gc_required = true;
312940d76c39SDaeho Jeong
313040d76c39SDaeho Jeong retry:
313140d76c39SDaeho Jeong f2fs_lock_op(sbi);
313240d76c39SDaeho Jeong err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
313340d76c39SDaeho Jeong f2fs_unlock_op(sbi);
313440d76c39SDaeho Jeong
313540d76c39SDaeho Jeong if (f2fs_sb_has_blkzoned(sbi) && err && gc_required) {
313640d76c39SDaeho Jeong f2fs_down_write(&sbi->gc_lock);
313740d76c39SDaeho Jeong f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), true, 1);
313840d76c39SDaeho Jeong f2fs_up_write(&sbi->gc_lock);
313940d76c39SDaeho Jeong
314040d76c39SDaeho Jeong gc_required = false;
314140d76c39SDaeho Jeong goto retry;
314240d76c39SDaeho Jeong }
314340d76c39SDaeho Jeong
314440d76c39SDaeho Jeong return err;
3145901d745fSChao Yu }
3146901d745fSChao Yu
f2fs_allocate_new_segments(struct f2fs_sb_info * sbi)3147901d745fSChao Yu void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
3148901d745fSChao Yu {
3149351df4b2SJaegeuk Kim int i;
3150351df4b2SJaegeuk Kim
3151e4544b63STim Murray f2fs_down_read(&SM_I(sbi)->curseg_lock);
31523d26fa6bSChao Yu down_write(&SIT_I(sbi)->sentry_lock);
3153901d745fSChao Yu for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
3154509f1010SChao Yu __allocate_new_segment(sbi, i, false, false);
31553d26fa6bSChao Yu up_write(&SIT_I(sbi)->sentry_lock);
3156e4544b63STim Murray f2fs_up_read(&SM_I(sbi)->curseg_lock);
3157351df4b2SJaegeuk Kim }
3158351df4b2SJaegeuk Kim
f2fs_exist_trim_candidates(struct f2fs_sb_info * sbi,struct cp_control * cpc)31594d57b86dSChao Yu bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
31604d57b86dSChao Yu struct cp_control *cpc)
316125290fa5SJaegeuk Kim {
316225290fa5SJaegeuk Kim __u64 trim_start = cpc->trim_start;
316325290fa5SJaegeuk Kim bool has_candidate = false;
316425290fa5SJaegeuk Kim
31653d26fa6bSChao Yu down_write(&SIT_I(sbi)->sentry_lock);
316625290fa5SJaegeuk Kim for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
316725290fa5SJaegeuk Kim if (add_discard_addrs(sbi, cpc, true)) {
316825290fa5SJaegeuk Kim has_candidate = true;
316925290fa5SJaegeuk Kim break;
317025290fa5SJaegeuk Kim }
317125290fa5SJaegeuk Kim }
31723d26fa6bSChao Yu up_write(&SIT_I(sbi)->sentry_lock);
317325290fa5SJaegeuk Kim
317425290fa5SJaegeuk Kim cpc->trim_start = trim_start;
317525290fa5SJaegeuk Kim return has_candidate;
317625290fa5SJaegeuk Kim }
317725290fa5SJaegeuk Kim
__issue_discard_cmd_range(struct f2fs_sb_info * sbi,struct discard_policy * dpolicy,unsigned int start,unsigned int end)317801f9cf6dSChao Yu static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
31799a997188SJaegeuk Kim struct discard_policy *dpolicy,
31809a997188SJaegeuk Kim unsigned int start, unsigned int end)
31819a997188SJaegeuk Kim {
31829a997188SJaegeuk Kim struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
31839a997188SJaegeuk Kim struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
31849a997188SJaegeuk Kim struct rb_node **insert_p = NULL, *insert_parent = NULL;
31859a997188SJaegeuk Kim struct discard_cmd *dc;
31869a997188SJaegeuk Kim struct blk_plug plug;
31879a997188SJaegeuk Kim int issued;
318801f9cf6dSChao Yu unsigned int trimmed = 0;
31899a997188SJaegeuk Kim
31909a997188SJaegeuk Kim next:
31919a997188SJaegeuk Kim issued = 0;
31929a997188SJaegeuk Kim
31939a997188SJaegeuk Kim mutex_lock(&dcc->cmd_lock);
319467fce70bSChao Yu if (unlikely(dcc->rbtree_check))
3195f69475ddSJaegeuk Kim f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
31969a997188SJaegeuk Kim
3197f69475ddSJaegeuk Kim dc = __lookup_discard_cmd_ret(&dcc->root, start,
3198f69475ddSJaegeuk Kim &prev_dc, &next_dc, &insert_p, &insert_parent);
31999a997188SJaegeuk Kim if (!dc)
32009a997188SJaegeuk Kim dc = next_dc;
32019a997188SJaegeuk Kim
32029a997188SJaegeuk Kim blk_start_plug(&plug);
32039a997188SJaegeuk Kim
3204f69475ddSJaegeuk Kim while (dc && dc->di.lstart <= end) {
32059a997188SJaegeuk Kim struct rb_node *node;
32066b9cb124SChao Yu int err = 0;
32079a997188SJaegeuk Kim
3208f69475ddSJaegeuk Kim if (dc->di.len < dpolicy->granularity)
32099a997188SJaegeuk Kim goto skip;
32109a997188SJaegeuk Kim
32119a997188SJaegeuk Kim if (dc->state != D_PREP) {
32129a997188SJaegeuk Kim list_move_tail(&dc->list, &dcc->fstrim_list);
32139a997188SJaegeuk Kim goto skip;
32149a997188SJaegeuk Kim }
32159a997188SJaegeuk Kim
32166b9cb124SChao Yu err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
32179a997188SJaegeuk Kim
321835ec7d57SChao Yu if (issued >= dpolicy->max_requests) {
3219f69475ddSJaegeuk Kim start = dc->di.lstart + dc->di.len;
32209a997188SJaegeuk Kim
32216b9cb124SChao Yu if (err)
32226b9cb124SChao Yu __remove_discard_cmd(sbi, dc);
32236b9cb124SChao Yu
32249a997188SJaegeuk Kim blk_finish_plug(&plug);
32259a997188SJaegeuk Kim mutex_unlock(&dcc->cmd_lock);
322601f9cf6dSChao Yu trimmed += __wait_all_discard_cmd(sbi, NULL);
3227a64239d0SNeilBrown f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
32289a997188SJaegeuk Kim goto next;
32299a997188SJaegeuk Kim }
32309a997188SJaegeuk Kim skip:
32319a997188SJaegeuk Kim node = rb_next(&dc->rb_node);
32326b9cb124SChao Yu if (err)
32336b9cb124SChao Yu __remove_discard_cmd(sbi, dc);
32349a997188SJaegeuk Kim dc = rb_entry_safe(node, struct discard_cmd, rb_node);
32359a997188SJaegeuk Kim
32369a997188SJaegeuk Kim if (fatal_signal_pending(current))
32379a997188SJaegeuk Kim break;
32389a997188SJaegeuk Kim }
32399a997188SJaegeuk Kim
32409a997188SJaegeuk Kim blk_finish_plug(&plug);
32419a997188SJaegeuk Kim mutex_unlock(&dcc->cmd_lock);
324201f9cf6dSChao Yu
324301f9cf6dSChao Yu return trimmed;
32449a997188SJaegeuk Kim }
32459a997188SJaegeuk Kim
f2fs_trim_fs(struct f2fs_sb_info * sbi,struct fstrim_range * range)32464b2fecc8SJaegeuk Kim int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
32474b2fecc8SJaegeuk Kim {
3248f7ef9b83SJaegeuk Kim __u64 start = F2FS_BYTES_TO_BLK(range->start);
3249f7ef9b83SJaegeuk Kim __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
3250377224c4SChao Yu unsigned int start_segno, end_segno;
32518412663dSChao Yu block_t start_block, end_block;
32524b2fecc8SJaegeuk Kim struct cp_control cpc;
325378997b56SChao Yu struct discard_policy dpolicy;
32540ea80512SChao Yu unsigned long long trimmed = 0;
3255c34f42e2SChao Yu int err = 0;
3256b0332a0fSChao Yu bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
32574b2fecc8SJaegeuk Kim
3258836b5a63SJaegeuk Kim if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
32594b2fecc8SJaegeuk Kim return -EINVAL;
32604b2fecc8SJaegeuk Kim
32613f16ecd9SChao Yu if (end < MAIN_BLKADDR(sbi))
32623f16ecd9SChao Yu goto out;
32634b2fecc8SJaegeuk Kim
3264ed214a11SYunlei He if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
3265dcbb4c10SJoe Perches f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
326610f966bbSChao Yu return -EFSCORRUPTED;
3267ed214a11SYunlei He }
3268ed214a11SYunlei He
32694b2fecc8SJaegeuk Kim /* start/end segment number in main_area */
32707cd8558bSJaegeuk Kim start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
32717cd8558bSJaegeuk Kim end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
32727cd8558bSJaegeuk Kim GET_SEGNO(sbi, end);
3273ad6672bbSYunlong Song if (need_align) {
3274f0248ba6SJaegeuk Kim start_segno = rounddown(start_segno, SEGS_PER_SEC(sbi));
3275f0248ba6SJaegeuk Kim end_segno = roundup(end_segno + 1, SEGS_PER_SEC(sbi)) - 1;
3276ad6672bbSYunlong Song }
32778412663dSChao Yu
32784b2fecc8SJaegeuk Kim cpc.reason = CP_DISCARD;
3279836b5a63SJaegeuk Kim cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
3280377224c4SChao Yu cpc.trim_start = start_segno;
3281377224c4SChao Yu cpc.trim_end = end_segno;
3282a66cdd98SJaegeuk Kim
3283a66cdd98SJaegeuk Kim if (sbi->discard_blks == 0)
3284377224c4SChao Yu goto out;
3285bba681cbSJaegeuk Kim
3286e4544b63STim Murray f2fs_down_write(&sbi->gc_lock);
3287eb61c2ccSChao Yu stat_inc_cp_call_count(sbi, TOTAL_CALL);
32884d57b86dSChao Yu err = f2fs_write_checkpoint(sbi, &cpc);
3289e4544b63STim Murray f2fs_up_write(&sbi->gc_lock);
3290e9328353SChao Yu if (err)
3291377224c4SChao Yu goto out;
32928412663dSChao Yu
3293e555da9fSJaegeuk Kim /*
3294e555da9fSJaegeuk Kim * We filed discard candidates, but actually we don't need to wait for
3295e555da9fSJaegeuk Kim * all of them, since they'll be issued in idle time along with runtime
3296e555da9fSJaegeuk Kim * discard option. User configuration looks like using runtime discard
3297e555da9fSJaegeuk Kim * or periodic fstrim instead of it.
3298e555da9fSJaegeuk Kim */
32997d20c8abSChao Yu if (f2fs_realtime_discard_enable(sbi))
33005a615492SJaegeuk Kim goto out;
33015a615492SJaegeuk Kim
33025a615492SJaegeuk Kim start_block = START_BLOCK(sbi, start_segno);
33035a615492SJaegeuk Kim end_block = START_BLOCK(sbi, end_segno + 1);
33045a615492SJaegeuk Kim
33055a615492SJaegeuk Kim __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
330601f9cf6dSChao Yu trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
330701f9cf6dSChao Yu start_block, end_block);
33085a615492SJaegeuk Kim
330901f9cf6dSChao Yu trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
33100ea80512SChao Yu start_block, end_block);
3311377224c4SChao Yu out:
33126eae2694SChao Yu if (!err)
33136eae2694SChao Yu range->len = F2FS_BLK_TO_BYTES(trimmed);
3314c34f42e2SChao Yu return err;
33154b2fecc8SJaegeuk Kim }
33164b2fecc8SJaegeuk Kim
f2fs_rw_hint_to_seg_type(enum rw_hint hint)33174d57b86dSChao Yu int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
33184f0a03d3SHyunchul Lee {
33194f0a03d3SHyunchul Lee switch (hint) {
33204f0a03d3SHyunchul Lee case WRITE_LIFE_SHORT:
33214f0a03d3SHyunchul Lee return CURSEG_HOT_DATA;
33224f0a03d3SHyunchul Lee case WRITE_LIFE_EXTREME:
33234f0a03d3SHyunchul Lee return CURSEG_COLD_DATA;
33244f0a03d3SHyunchul Lee default:
33254f0a03d3SHyunchul Lee return CURSEG_WARM_DATA;
33264f0a03d3SHyunchul Lee }
33274f0a03d3SHyunchul Lee }
33284f0a03d3SHyunchul Lee
__get_segment_type_2(struct f2fs_io_info * fio)332981377bd6SJaegeuk Kim static int __get_segment_type_2(struct f2fs_io_info *fio)
3330351df4b2SJaegeuk Kim {
333181377bd6SJaegeuk Kim if (fio->type == DATA)
3332351df4b2SJaegeuk Kim return CURSEG_HOT_DATA;
3333351df4b2SJaegeuk Kim else
3334351df4b2SJaegeuk Kim return CURSEG_HOT_NODE;
3335351df4b2SJaegeuk Kim }
3336351df4b2SJaegeuk Kim
__get_segment_type_4(struct f2fs_io_info * fio)333781377bd6SJaegeuk Kim static int __get_segment_type_4(struct f2fs_io_info *fio)
3338351df4b2SJaegeuk Kim {
333981377bd6SJaegeuk Kim if (fio->type == DATA) {
334081377bd6SJaegeuk Kim struct inode *inode = fio->page->mapping->host;
3341351df4b2SJaegeuk Kim
3342351df4b2SJaegeuk Kim if (S_ISDIR(inode->i_mode))
3343351df4b2SJaegeuk Kim return CURSEG_HOT_DATA;
3344351df4b2SJaegeuk Kim else
3345351df4b2SJaegeuk Kim return CURSEG_COLD_DATA;
3346351df4b2SJaegeuk Kim } else {
334781377bd6SJaegeuk Kim if (IS_DNODE(fio->page) && is_cold_node(fio->page))
3348a344b9fdSJaegeuk Kim return CURSEG_WARM_NODE;
3349351df4b2SJaegeuk Kim else
3350351df4b2SJaegeuk Kim return CURSEG_COLD_NODE;
3351351df4b2SJaegeuk Kim }
3352351df4b2SJaegeuk Kim }
3353351df4b2SJaegeuk Kim
__get_age_segment_type(struct inode * inode,pgoff_t pgofs)335471644dffSJaegeuk Kim static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs)
335571644dffSJaegeuk Kim {
335671644dffSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3357fe59109aSJaegeuk Kim struct extent_info ei = {};
335871644dffSJaegeuk Kim
335971644dffSJaegeuk Kim if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) {
336071644dffSJaegeuk Kim if (!ei.age)
336171644dffSJaegeuk Kim return NO_CHECK_TYPE;
336271644dffSJaegeuk Kim if (ei.age <= sbi->hot_data_age_threshold)
336371644dffSJaegeuk Kim return CURSEG_HOT_DATA;
336471644dffSJaegeuk Kim if (ei.age <= sbi->warm_data_age_threshold)
336571644dffSJaegeuk Kim return CURSEG_WARM_DATA;
336671644dffSJaegeuk Kim return CURSEG_COLD_DATA;
336771644dffSJaegeuk Kim }
336871644dffSJaegeuk Kim return NO_CHECK_TYPE;
336971644dffSJaegeuk Kim }
337071644dffSJaegeuk Kim
__get_segment_type_6(struct f2fs_io_info * fio)337181377bd6SJaegeuk Kim static int __get_segment_type_6(struct f2fs_io_info *fio)
3372351df4b2SJaegeuk Kim {
337381377bd6SJaegeuk Kim if (fio->type == DATA) {
337481377bd6SJaegeuk Kim struct inode *inode = fio->page->mapping->host;
337571644dffSJaegeuk Kim int type;
3376351df4b2SJaegeuk Kim
3377859fca6bSChao Yu if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
3378859fca6bSChao Yu return CURSEG_COLD_DATA_PINNED;
3379859fca6bSChao Yu
3380b763f3beSChao Yu if (page_private_gcing(fio->page)) {
3381ac2d750bSWeichao Guo if (fio->sbi->am.atgc_enabled &&
3382ac2d750bSWeichao Guo (fio->io_type == FS_DATA_IO) &&
3383f911be11SZhiguo Niu (fio->sbi->gc_mode != GC_URGENT_HIGH) &&
33844239571cSJaegeuk Kim __is_valid_data_blkaddr(fio->old_blkaddr) &&
3385f911be11SZhiguo Niu !is_inode_flag_set(inode, FI_OPU_WRITE))
3386093749e2SChao Yu return CURSEG_ALL_DATA_ATGC;
3387093749e2SChao Yu else
3388093749e2SChao Yu return CURSEG_COLD_DATA;
3389093749e2SChao Yu }
3390602a16d5SDaeho Jeong if (file_is_cold(inode) || f2fs_need_compress_data(inode))
3391351df4b2SJaegeuk Kim return CURSEG_COLD_DATA;
339271644dffSJaegeuk Kim
339371644dffSJaegeuk Kim type = __get_age_segment_type(inode, fio->page->index);
339471644dffSJaegeuk Kim if (type != NO_CHECK_TYPE)
339571644dffSJaegeuk Kim return type;
339671644dffSJaegeuk Kim
3397b6a06cbbSChao Yu if (file_is_hot(inode) ||
3398b4c3ca8bSChao Yu is_inode_flag_set(inode, FI_HOT_DATA) ||
33994a2c5b79SYe Bin f2fs_is_cow_file(inode))
3400ef095d19SJaegeuk Kim return CURSEG_HOT_DATA;
34014d57b86dSChao Yu return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
3402351df4b2SJaegeuk Kim } else {
340381377bd6SJaegeuk Kim if (IS_DNODE(fio->page))
340481377bd6SJaegeuk Kim return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
3405351df4b2SJaegeuk Kim CURSEG_HOT_NODE;
3406351df4b2SJaegeuk Kim return CURSEG_COLD_NODE;
3407351df4b2SJaegeuk Kim }
3408351df4b2SJaegeuk Kim }
3409351df4b2SJaegeuk Kim
__get_segment_type(struct f2fs_io_info * fio)341081377bd6SJaegeuk Kim static int __get_segment_type(struct f2fs_io_info *fio)
3411351df4b2SJaegeuk Kim {
3412a912b54dSJaegeuk Kim int type = 0;
3413a912b54dSJaegeuk Kim
341463189b78SChao Yu switch (F2FS_OPTION(fio->sbi).active_logs) {
3415351df4b2SJaegeuk Kim case 2:
3416a912b54dSJaegeuk Kim type = __get_segment_type_2(fio);
3417a912b54dSJaegeuk Kim break;
3418351df4b2SJaegeuk Kim case 4:
3419a912b54dSJaegeuk Kim type = __get_segment_type_4(fio);
3420a912b54dSJaegeuk Kim break;
3421a912b54dSJaegeuk Kim case 6:
3422a912b54dSJaegeuk Kim type = __get_segment_type_6(fio);
3423a912b54dSJaegeuk Kim break;
3424a912b54dSJaegeuk Kim default:
3425a912b54dSJaegeuk Kim f2fs_bug_on(fio->sbi, true);
3426351df4b2SJaegeuk Kim }
342781377bd6SJaegeuk Kim
3428a912b54dSJaegeuk Kim if (IS_HOT(type))
3429a912b54dSJaegeuk Kim fio->temp = HOT;
3430a912b54dSJaegeuk Kim else if (IS_WARM(type))
3431a912b54dSJaegeuk Kim fio->temp = WARM;
3432a912b54dSJaegeuk Kim else
3433a912b54dSJaegeuk Kim fio->temp = COLD;
3434a912b54dSJaegeuk Kim return type;
3435351df4b2SJaegeuk Kim }
3436351df4b2SJaegeuk Kim
f2fs_randomize_chunk(struct f2fs_sb_info * sbi,struct curseg_info * seg)34374a209588SChristoph Hellwig static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi,
34384a209588SChristoph Hellwig struct curseg_info *seg)
34394a209588SChristoph Hellwig {
34404a209588SChristoph Hellwig /* To allocate block chunks in different sizes, use random number */
34414a209588SChristoph Hellwig if (--seg->fragment_remained_chunk > 0)
34424a209588SChristoph Hellwig return;
34434a209588SChristoph Hellwig
34444a209588SChristoph Hellwig seg->fragment_remained_chunk =
34454a209588SChristoph Hellwig get_random_u32_inclusive(1, sbi->max_fragment_chunk);
34464a209588SChristoph Hellwig seg->next_blkoff +=
34474a209588SChristoph Hellwig get_random_u32_inclusive(1, sbi->max_fragment_hole);
34484a209588SChristoph Hellwig }
34494a209588SChristoph Hellwig
f2fs_allocate_data_block(struct f2fs_sb_info * sbi,struct page * page,block_t old_blkaddr,block_t * new_blkaddr,struct f2fs_summary * sum,int type,struct f2fs_io_info * fio)34504d57b86dSChao Yu void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3451351df4b2SJaegeuk Kim block_t old_blkaddr, block_t *new_blkaddr,
3452fb830fc5SChao Yu struct f2fs_summary *sum, int type,
3453093749e2SChao Yu struct f2fs_io_info *fio)
3454351df4b2SJaegeuk Kim {
3455351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi);
34566ae1be13SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type);
3457c5d02785SChao Yu unsigned long long old_mtime;
3458093749e2SChao Yu bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
3459093749e2SChao Yu struct seg_entry *se = NULL;
346088c9edfdSChristoph Hellwig bool segment_full = false;
3461351df4b2SJaegeuk Kim
3462e4544b63STim Murray f2fs_down_read(&SM_I(sbi)->curseg_lock);
34632b60311dSChao Yu
3464351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex);
34653d26fa6bSChao Yu down_write(&sit_i->sentry_lock);
3466351df4b2SJaegeuk Kim
3467093749e2SChao Yu if (from_gc) {
3468093749e2SChao Yu f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3469093749e2SChao Yu se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3470093749e2SChao Yu sanity_check_seg_type(sbi, se->type);
3471093749e2SChao Yu f2fs_bug_on(sbi, IS_NODESEG(se->type));
3472093749e2SChao Yu }
3473351df4b2SJaegeuk Kim *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3474351df4b2SJaegeuk Kim
3475f0248ba6SJaegeuk Kim f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi));
3476093749e2SChao Yu
34774e6a8d9bSJaegeuk Kim f2fs_wait_discard_bio(sbi, *new_blkaddr);
34784e6a8d9bSJaegeuk Kim
34792163a691SChristoph Hellwig curseg->sum_blk->entries[curseg->next_blkoff] = *sum;
34804a209588SChristoph Hellwig if (curseg->alloc_type == SSR) {
34814a209588SChristoph Hellwig curseg->next_blkoff = f2fs_find_next_ssr_block(sbi, curseg);
34824a209588SChristoph Hellwig } else {
34834a209588SChristoph Hellwig curseg->next_blkoff++;
34844a209588SChristoph Hellwig if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
34854a209588SChristoph Hellwig f2fs_randomize_chunk(sbi, curseg);
34864a209588SChristoph Hellwig }
348788c9edfdSChristoph Hellwig if (curseg->next_blkoff >= f2fs_usable_blks_in_seg(sbi, curseg->segno))
348888c9edfdSChristoph Hellwig segment_full = true;
3489dcdfff65SJaegeuk Kim stat_inc_block_count(sbi, curseg);
3490351df4b2SJaegeuk Kim
3491c5d02785SChao Yu if (from_gc) {
3492c5d02785SChao Yu old_mtime = get_segment_mtime(sbi, old_blkaddr);
3493c5d02785SChao Yu } else {
3494c5d02785SChao Yu update_segment_mtime(sbi, old_blkaddr, 0);
3495c5d02785SChao Yu old_mtime = 0;
3496c5d02785SChao Yu }
3497c5d02785SChao Yu update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3498c5d02785SChao Yu
349965f1b80bSYunlong Song /*
350065f1b80bSYunlong Song * SIT information should be updated before segment allocation,
350165f1b80bSYunlong Song * since SSR needs latest valid block information.
350265f1b80bSYunlong Song */
350365f1b80bSYunlong Song update_sit_entry(sbi, *new_blkaddr, 1);
350465f1b80bSYunlong Song update_sit_entry(sbi, old_blkaddr, -1);
350565f1b80bSYunlong Song
35068442d94bSChristoph Hellwig /*
350788c9edfdSChristoph Hellwig * If the current segment is full, flush it out and replace it with a
350888c9edfdSChristoph Hellwig * new segment.
35098442d94bSChristoph Hellwig */
351088c9edfdSChristoph Hellwig if (segment_full) {
351140d76c39SDaeho Jeong if (type == CURSEG_COLD_DATA_PINNED &&
35124647876eSDaeho Jeong !((curseg->segno + 1) % sbi->segs_per_sec)) {
35134647876eSDaeho Jeong write_sum_page(sbi, curseg->sum_blk,
35144647876eSDaeho Jeong GET_SUM_BLOCK(sbi, curseg->segno));
351540d76c39SDaeho Jeong goto skip_new_segment;
35164647876eSDaeho Jeong }
351740d76c39SDaeho Jeong
35188442d94bSChristoph Hellwig if (from_gc) {
3519093749e2SChao Yu get_atssr_segment(sbi, type, se->type,
3520093749e2SChao Yu AT_SSR, se->mtime);
35218442d94bSChristoph Hellwig } else {
35228442d94bSChristoph Hellwig if (need_new_seg(sbi, type))
35238442d94bSChristoph Hellwig new_curseg(sbi, type, false);
3524093749e2SChao Yu else
35255bcd655fSChristoph Hellwig change_curseg(sbi, type);
35268442d94bSChristoph Hellwig stat_inc_seg_type(sbi, curseg);
35278442d94bSChristoph Hellwig }
3528093749e2SChao Yu }
352940d76c39SDaeho Jeong
353040d76c39SDaeho Jeong skip_new_segment:
3531c6f82fe9SJaegeuk Kim /*
353265f1b80bSYunlong Song * segment dirty status should be updated after segment allocation,
353365f1b80bSYunlong Song * so we just need to update status only one time after previous
353465f1b80bSYunlong Song * segment being closed.
3535c6f82fe9SJaegeuk Kim */
353665f1b80bSYunlong Song locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
353765f1b80bSYunlong Song locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
35383436c4bdSYunlong Song
3539e690dddaSChao Yu if (IS_DATASEG(curseg->seg_type))
354071644dffSJaegeuk Kim atomic64_inc(&sbi->allocated_data_blocks);
354171644dffSJaegeuk Kim
35423d26fa6bSChao Yu up_write(&sit_i->sentry_lock);
3543351df4b2SJaegeuk Kim
3544e690dddaSChao Yu if (page && IS_NODESEG(curseg->seg_type)) {
3545351df4b2SJaegeuk Kim fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3546351df4b2SJaegeuk Kim
3547704956ecSChao Yu f2fs_inode_chksum_set(sbi, page);
3548704956ecSChao Yu }
3549704956ecSChao Yu
3550f608c38cSChao Yu if (fio) {
3551fb830fc5SChao Yu struct f2fs_bio_info *io;
3552fb830fc5SChao Yu
3553fb830fc5SChao Yu INIT_LIST_HEAD(&fio->list);
35542eae077eSChao Yu fio->in_list = 1;
3555fb830fc5SChao Yu io = sbi->write_io[fio->type] + fio->temp;
3556fb830fc5SChao Yu spin_lock(&io->io_lock);
3557fb830fc5SChao Yu list_add_tail(&fio->list, &io->io_list);
3558fb830fc5SChao Yu spin_unlock(&io->io_lock);
3559fb830fc5SChao Yu }
3560fb830fc5SChao Yu
3561bfad7c2dSJaegeuk Kim mutex_unlock(&curseg->curseg_mutex);
35622b60311dSChao Yu
3563e4544b63STim Murray f2fs_up_read(&SM_I(sbi)->curseg_lock);
3564bfad7c2dSJaegeuk Kim }
3565bfad7c2dSJaegeuk Kim
f2fs_update_device_state(struct f2fs_sb_info * sbi,nid_t ino,block_t blkaddr,unsigned int blkcnt)356671f2c820SChao Yu void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
356771f2c820SChao Yu block_t blkaddr, unsigned int blkcnt)
356839d787beSChao Yu {
35690916878dSDamien Le Moal if (!f2fs_is_multi_device(sbi))
357039d787beSChao Yu return;
357139d787beSChao Yu
357271f2c820SChao Yu while (1) {
357371f2c820SChao Yu unsigned int devidx = f2fs_target_device_index(sbi, blkaddr);
357471f2c820SChao Yu unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1;
357539d787beSChao Yu
357639d787beSChao Yu /* update device state for fsync */
357771f2c820SChao Yu f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO);
35781228b482SChao Yu
35791228b482SChao Yu /* update device state for checkpoint */
35801228b482SChao Yu if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
35811228b482SChao Yu spin_lock(&sbi->dev_lock);
35821228b482SChao Yu f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
35831228b482SChao Yu spin_unlock(&sbi->dev_lock);
35841228b482SChao Yu }
358571f2c820SChao Yu
358671f2c820SChao Yu if (blkcnt <= blks)
358771f2c820SChao Yu break;
358871f2c820SChao Yu blkcnt -= blks;
358971f2c820SChao Yu blkaddr += blks;
359071f2c820SChao Yu }
359139d787beSChao Yu }
359239d787beSChao Yu
do_write_page(struct f2fs_summary * sum,struct f2fs_io_info * fio)359305ca3632SJaegeuk Kim static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
3594bfad7c2dSJaegeuk Kim {
359581377bd6SJaegeuk Kim int type = __get_segment_type(fio);
3596b0332a0fSChao Yu bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
3597bfad7c2dSJaegeuk Kim
3598107a805dSChao Yu if (keep_order)
3599e4544b63STim Murray f2fs_down_read(&fio->sbi->io_order_lock);
36005f8e5a09SJaegeuk Kim
36014d57b86dSChao Yu f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
3602093749e2SChao Yu &fio->new_blkaddr, sum, type, fio);
3603cfd217f6SChao Yu if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
3604cfd217f6SChao Yu f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr);
3605bfad7c2dSJaegeuk Kim
3606351df4b2SJaegeuk Kim /* writeout dirty page into bdev */
3607fe16efe6SChao Yu f2fs_submit_page_write(fio);
3608fe16efe6SChao Yu
360971f2c820SChao Yu f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
3610fe16efe6SChao Yu
3611107a805dSChao Yu if (keep_order)
3612e4544b63STim Murray f2fs_up_read(&fio->sbi->io_order_lock);
3613351df4b2SJaegeuk Kim }
3614351df4b2SJaegeuk Kim
f2fs_do_write_meta_page(struct f2fs_sb_info * sbi,struct page * page,enum iostat_type io_type)36154d57b86dSChao Yu void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3616b0af6d49SChao Yu enum iostat_type io_type)
3617351df4b2SJaegeuk Kim {
3618458e6197SJaegeuk Kim struct f2fs_io_info fio = {
361905ca3632SJaegeuk Kim .sbi = sbi,
3620458e6197SJaegeuk Kim .type = META,
36210cdd3195SHyunchul Lee .temp = HOT,
362204d328deSMike Christie .op = REQ_OP_WRITE,
362370fd7614SChristoph Hellwig .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
36247a9d7548SChao Yu .old_blkaddr = page->index,
36257a9d7548SChao Yu .new_blkaddr = page->index,
362605ca3632SJaegeuk Kim .page = page,
36274375a336SJaegeuk Kim .encrypted_page = NULL,
36282eae077eSChao Yu .in_list = 0,
3629458e6197SJaegeuk Kim };
3630458e6197SJaegeuk Kim
36312b947003SChao Yu if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
363204d328deSMike Christie fio.op_flags &= ~REQ_META;
36332b947003SChao Yu
3634351df4b2SJaegeuk Kim set_page_writeback(page);
3635b9109b0eSJaegeuk Kim f2fs_submit_page_write(&fio);
3636b0af6d49SChao Yu
3637b63e7be5SChao Yu stat_inc_meta_count(sbi, page->index);
363834a23525SChao Yu f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE);
3639351df4b2SJaegeuk Kim }
3640351df4b2SJaegeuk Kim
f2fs_do_write_node_page(unsigned int nid,struct f2fs_io_info * fio)36414d57b86dSChao Yu void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
3642351df4b2SJaegeuk Kim {
3643351df4b2SJaegeuk Kim struct f2fs_summary sum;
364405ca3632SJaegeuk Kim
3645351df4b2SJaegeuk Kim set_summary(&sum, nid, 0, 0);
364605ca3632SJaegeuk Kim do_write_page(&sum, fio);
3647b0af6d49SChao Yu
364834a23525SChao Yu f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE);
3649351df4b2SJaegeuk Kim }
3650351df4b2SJaegeuk Kim
f2fs_outplace_write_data(struct dnode_of_data * dn,struct f2fs_io_info * fio)36514d57b86dSChao Yu void f2fs_outplace_write_data(struct dnode_of_data *dn,
36524d57b86dSChao Yu struct f2fs_io_info *fio)
3653351df4b2SJaegeuk Kim {
365405ca3632SJaegeuk Kim struct f2fs_sb_info *sbi = fio->sbi;
3655351df4b2SJaegeuk Kim struct f2fs_summary sum;
3656351df4b2SJaegeuk Kim
36579850cf4aSJaegeuk Kim f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
365871644dffSJaegeuk Kim if (fio->io_type == FS_DATA_IO || fio->io_type == FS_CP_DATA_IO)
365971644dffSJaegeuk Kim f2fs_update_age_extent_cache(dn);
36607735730dSChao Yu set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
366105ca3632SJaegeuk Kim do_write_page(&sum, fio);
3662f28b3434SChao Yu f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
3663b0af6d49SChao Yu
366434a23525SChao Yu f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE);
3665351df4b2SJaegeuk Kim }
3666351df4b2SJaegeuk Kim
f2fs_inplace_write_data(struct f2fs_io_info * fio)36674d57b86dSChao Yu int f2fs_inplace_write_data(struct f2fs_io_info *fio)
3668351df4b2SJaegeuk Kim {
3669b0af6d49SChao Yu int err;
3670d21b0f23SYunlei He struct f2fs_sb_info *sbi = fio->sbi;
367105573d6cSChao Yu unsigned int segno;
3672b0af6d49SChao Yu
36737a9d7548SChao Yu fio->new_blkaddr = fio->old_blkaddr;
36740cdd3195SHyunchul Lee /* i/o temperature is needed for passing down write hints */
36750cdd3195SHyunchul Lee __get_segment_type(fio);
3676d21b0f23SYunlei He
367705573d6cSChao Yu segno = GET_SEGNO(sbi, fio->new_blkaddr);
367805573d6cSChao Yu
367905573d6cSChao Yu if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
368005573d6cSChao Yu set_sbi_flag(sbi, SBI_NEED_FSCK);
36812d821c12SChao Yu f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
36822d821c12SChao Yu __func__, segno);
368395577278SChao Yu err = -EFSCORRUPTED;
368495fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
368595577278SChao Yu goto drop_bio;
368695577278SChao Yu }
368795577278SChao Yu
36881ffc8f5fSJaegeuk Kim if (f2fs_cp_error(sbi)) {
368995577278SChao Yu err = -EIO;
369095577278SChao Yu goto drop_bio;
369105573d6cSChao Yu }
3692d21b0f23SYunlei He
3693271fda62SSunmin Jeong if (fio->meta_gc)
3694c92f2927SChao Yu f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1);
3695e3b49ea3SHyeong-Jun Kim
369605ca3632SJaegeuk Kim stat_inc_inplace_blocks(fio->sbi);
3697b0af6d49SChao Yu
3698fdb7ccc3SYangtao Li if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi))
36998648de2cSChao Yu err = f2fs_merge_page_bio(fio);
37008648de2cSChao Yu else
3701b0af6d49SChao Yu err = f2fs_submit_page_bio(fio);
3702e46f6bd8SChao Yu if (!err) {
370371f2c820SChao Yu f2fs_update_device_state(fio->sbi, fio->ino,
370471f2c820SChao Yu fio->new_blkaddr, 1);
370534a23525SChao Yu f2fs_update_iostat(fio->sbi, fio->page->mapping->host,
370634a23525SChao Yu fio->io_type, F2FS_BLKSIZE);
3707e46f6bd8SChao Yu }
3708b0af6d49SChao Yu
3709b0af6d49SChao Yu return err;
371095577278SChao Yu drop_bio:
3711349c4d6cSJaegeuk Kim if (fio->bio && *(fio->bio)) {
371295577278SChao Yu struct bio *bio = *(fio->bio);
371395577278SChao Yu
371495577278SChao Yu bio->bi_status = BLK_STS_IOERR;
371595577278SChao Yu bio_endio(bio);
3716349c4d6cSJaegeuk Kim *(fio->bio) = NULL;
371795577278SChao Yu }
371895577278SChao Yu return err;
3719351df4b2SJaegeuk Kim }
3720351df4b2SJaegeuk Kim
__f2fs_get_curseg(struct f2fs_sb_info * sbi,unsigned int segno)37212b60311dSChao Yu static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
37222b60311dSChao Yu unsigned int segno)
37232b60311dSChao Yu {
37242b60311dSChao Yu int i;
37252b60311dSChao Yu
37262b60311dSChao Yu for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
37272b60311dSChao Yu if (CURSEG_I(sbi, i)->segno == segno)
37282b60311dSChao Yu break;
37292b60311dSChao Yu }
37302b60311dSChao Yu return i;
37312b60311dSChao Yu }
37322b60311dSChao Yu
f2fs_do_replace_block(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,block_t old_blkaddr,block_t new_blkaddr,bool recover_curseg,bool recover_newaddr,bool from_gc)37334d57b86dSChao Yu void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
373419f106bcSChao Yu block_t old_blkaddr, block_t new_blkaddr,
3735c5d02785SChao Yu bool recover_curseg, bool recover_newaddr,
3736c5d02785SChao Yu bool from_gc)
3737351df4b2SJaegeuk Kim {
3738351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi);
3739351df4b2SJaegeuk Kim struct curseg_info *curseg;
3740351df4b2SJaegeuk Kim unsigned int segno, old_cursegno;
3741351df4b2SJaegeuk Kim struct seg_entry *se;
3742351df4b2SJaegeuk Kim int type;
374319f106bcSChao Yu unsigned short old_blkoff;
3744753a8ed0SWang Xiaojun unsigned char old_alloc_type;
3745351df4b2SJaegeuk Kim
3746351df4b2SJaegeuk Kim segno = GET_SEGNO(sbi, new_blkaddr);
3747351df4b2SJaegeuk Kim se = get_seg_entry(sbi, segno);
3748351df4b2SJaegeuk Kim type = se->type;
3749351df4b2SJaegeuk Kim
3750e4544b63STim Murray f2fs_down_write(&SM_I(sbi)->curseg_lock);
37512b60311dSChao Yu
375219f106bcSChao Yu if (!recover_curseg) {
375319f106bcSChao Yu /* for recovery flow */
3754351df4b2SJaegeuk Kim if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
3755351df4b2SJaegeuk Kim if (old_blkaddr == NULL_ADDR)
3756351df4b2SJaegeuk Kim type = CURSEG_COLD_DATA;
3757351df4b2SJaegeuk Kim else
3758351df4b2SJaegeuk Kim type = CURSEG_WARM_DATA;
3759351df4b2SJaegeuk Kim }
376019f106bcSChao Yu } else {
37612b60311dSChao Yu if (IS_CURSEG(sbi, segno)) {
37622b60311dSChao Yu /* se->type is volatile as SSR allocation */
37632b60311dSChao Yu type = __f2fs_get_curseg(sbi, segno);
37642b60311dSChao Yu f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
37652b60311dSChao Yu } else {
376619f106bcSChao Yu type = CURSEG_WARM_DATA;
376719f106bcSChao Yu }
37682b60311dSChao Yu }
376919f106bcSChao Yu
3770351df4b2SJaegeuk Kim curseg = CURSEG_I(sbi, type);
3771f3d586b7SLongPing Wei f2fs_bug_on(sbi, !IS_DATASEG(curseg->seg_type));
3772351df4b2SJaegeuk Kim
3773351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex);
37743d26fa6bSChao Yu down_write(&sit_i->sentry_lock);
3775351df4b2SJaegeuk Kim
3776351df4b2SJaegeuk Kim old_cursegno = curseg->segno;
377719f106bcSChao Yu old_blkoff = curseg->next_blkoff;
3778753a8ed0SWang Xiaojun old_alloc_type = curseg->alloc_type;
3779351df4b2SJaegeuk Kim
3780351df4b2SJaegeuk Kim /* change the current segment */
3781351df4b2SJaegeuk Kim if (segno != curseg->segno) {
3782351df4b2SJaegeuk Kim curseg->next_segno = segno;
37835bcd655fSChristoph Hellwig change_curseg(sbi, type);
3784351df4b2SJaegeuk Kim }
3785351df4b2SJaegeuk Kim
3786491c0854SJaegeuk Kim curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
37872163a691SChristoph Hellwig curseg->sum_blk->entries[curseg->next_blkoff] = *sum;
3788351df4b2SJaegeuk Kim
3789c5d02785SChao Yu if (!recover_curseg || recover_newaddr) {
3790c5d02785SChao Yu if (!from_gc)
3791c5d02785SChao Yu update_segment_mtime(sbi, new_blkaddr, 0);
37926e2c64adSJaegeuk Kim update_sit_entry(sbi, new_blkaddr, 1);
3793c5d02785SChao Yu }
37946aa58d8aSChao Yu if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
3795cfd217f6SChao Yu f2fs_invalidate_internal_cache(sbi, old_blkaddr);
3796c5d02785SChao Yu if (!from_gc)
3797c5d02785SChao Yu update_segment_mtime(sbi, old_blkaddr, 0);
37986e2c64adSJaegeuk Kim update_sit_entry(sbi, old_blkaddr, -1);
37996aa58d8aSChao Yu }
38006e2c64adSJaegeuk Kim
38016e2c64adSJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
38026e2c64adSJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
38036e2c64adSJaegeuk Kim
3804351df4b2SJaegeuk Kim locate_dirty_segment(sbi, old_cursegno);
3805351df4b2SJaegeuk Kim
380619f106bcSChao Yu if (recover_curseg) {
380719f106bcSChao Yu if (old_cursegno != curseg->segno) {
380819f106bcSChao Yu curseg->next_segno = old_cursegno;
38095bcd655fSChristoph Hellwig change_curseg(sbi, type);
381019f106bcSChao Yu }
381119f106bcSChao Yu curseg->next_blkoff = old_blkoff;
3812753a8ed0SWang Xiaojun curseg->alloc_type = old_alloc_type;
381319f106bcSChao Yu }
381419f106bcSChao Yu
38153d26fa6bSChao Yu up_write(&sit_i->sentry_lock);
3816351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex);
3817e4544b63STim Murray f2fs_up_write(&SM_I(sbi)->curseg_lock);
3818351df4b2SJaegeuk Kim }
3819351df4b2SJaegeuk Kim
f2fs_replace_block(struct f2fs_sb_info * sbi,struct dnode_of_data * dn,block_t old_addr,block_t new_addr,unsigned char version,bool recover_curseg,bool recover_newaddr)3820528e3459SChao Yu void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3821528e3459SChao Yu block_t old_addr, block_t new_addr,
382228bc106bSChao Yu unsigned char version, bool recover_curseg,
382328bc106bSChao Yu bool recover_newaddr)
3824528e3459SChao Yu {
3825528e3459SChao Yu struct f2fs_summary sum;
3826528e3459SChao Yu
3827528e3459SChao Yu set_summary(&sum, dn->nid, dn->ofs_in_node, version);
3828528e3459SChao Yu
38294d57b86dSChao Yu f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
3830c5d02785SChao Yu recover_curseg, recover_newaddr, false);
3831528e3459SChao Yu
3832f28b3434SChao Yu f2fs_update_data_blkaddr(dn, new_addr);
3833528e3459SChao Yu }
3834528e3459SChao Yu
f2fs_wait_on_page_writeback(struct page * page,enum page_type type,bool ordered,bool locked)383593dfe2acSJaegeuk Kim void f2fs_wait_on_page_writeback(struct page *page,
3836bae0ee7aSChao Yu enum page_type type, bool ordered, bool locked)
383793dfe2acSJaegeuk Kim {
383893dfe2acSJaegeuk Kim if (PageWriteback(page)) {
38394081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_P_SB(page);
38404081363fSJaegeuk Kim
38410b20fcecSChao Yu /* submit cached LFS IO */
3842bab475c5SChao Yu f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
3843146949deSJinyoung CHOI /* submit cached IPU IO */
38440b20fcecSChao Yu f2fs_submit_merged_ipu_write(sbi, NULL, page);
3845bae0ee7aSChao Yu if (ordered) {
384693dfe2acSJaegeuk Kim wait_on_page_writeback(page);
3847bae0ee7aSChao Yu f2fs_bug_on(sbi, locked && PageWriteback(page));
3848bae0ee7aSChao Yu } else {
3849fec1d657SJaegeuk Kim wait_for_stable_page(page);
385093dfe2acSJaegeuk Kim }
385193dfe2acSJaegeuk Kim }
3852bae0ee7aSChao Yu }
385393dfe2acSJaegeuk Kim
f2fs_wait_on_block_writeback(struct inode * inode,block_t blkaddr)38540ded69f6SJaegeuk Kim void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
385508b39fbdSChao Yu {
38560ded69f6SJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
385708b39fbdSChao Yu struct page *cpage;
385808b39fbdSChao Yu
3859271fda62SSunmin Jeong if (!f2fs_meta_inode_gc_required(inode))
38600ded69f6SJaegeuk Kim return;
38610ded69f6SJaegeuk Kim
386293770ab7SChao Yu if (!__is_valid_data_blkaddr(blkaddr))
386308b39fbdSChao Yu return;
386408b39fbdSChao Yu
386508b39fbdSChao Yu cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
386608b39fbdSChao Yu if (cpage) {
3867bae0ee7aSChao Yu f2fs_wait_on_page_writeback(cpage, DATA, true, true);
386808b39fbdSChao Yu f2fs_put_page(cpage, 1);
386908b39fbdSChao Yu }
387008b39fbdSChao Yu }
387108b39fbdSChao Yu
f2fs_wait_on_block_writeback_range(struct inode * inode,block_t blkaddr,block_t len)38721e78e8bdSSahitya Tummala void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
38731e78e8bdSSahitya Tummala block_t len)
38741e78e8bdSSahitya Tummala {
38750d5b9d81SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
38761e78e8bdSSahitya Tummala block_t i;
38771e78e8bdSSahitya Tummala
3878271fda62SSunmin Jeong if (!f2fs_meta_inode_gc_required(inode))
38790d5b9d81SChao Yu return;
38800d5b9d81SChao Yu
38811e78e8bdSSahitya Tummala for (i = 0; i < len; i++)
38821e78e8bdSSahitya Tummala f2fs_wait_on_block_writeback(inode, blkaddr + i);
38830d5b9d81SChao Yu
3884c92f2927SChao Yu f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
38851e78e8bdSSahitya Tummala }
38861e78e8bdSSahitya Tummala
read_compacted_summaries(struct f2fs_sb_info * sbi)38877735730dSChao Yu static int read_compacted_summaries(struct f2fs_sb_info *sbi)
3888351df4b2SJaegeuk Kim {
3889351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3890351df4b2SJaegeuk Kim struct curseg_info *seg_i;
3891351df4b2SJaegeuk Kim unsigned char *kaddr;
3892351df4b2SJaegeuk Kim struct page *page;
3893351df4b2SJaegeuk Kim block_t start;
3894351df4b2SJaegeuk Kim int i, j, offset;
3895351df4b2SJaegeuk Kim
3896351df4b2SJaegeuk Kim start = start_sum_block(sbi);
3897351df4b2SJaegeuk Kim
38984d57b86dSChao Yu page = f2fs_get_meta_page(sbi, start++);
38997735730dSChao Yu if (IS_ERR(page))
39007735730dSChao Yu return PTR_ERR(page);
3901351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page);
3902351df4b2SJaegeuk Kim
3903351df4b2SJaegeuk Kim /* Step 1: restore nat cache */
3904351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3905b7ad7512SChao Yu memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
3906351df4b2SJaegeuk Kim
3907351df4b2SJaegeuk Kim /* Step 2: restore sit cache */
3908351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3909b7ad7512SChao Yu memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
3910351df4b2SJaegeuk Kim offset = 2 * SUM_JOURNAL_SIZE;
3911351df4b2SJaegeuk Kim
3912351df4b2SJaegeuk Kim /* Step 3: restore summary entries */
3913351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3914351df4b2SJaegeuk Kim unsigned short blk_off;
3915351df4b2SJaegeuk Kim unsigned int segno;
3916351df4b2SJaegeuk Kim
3917351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, i);
3918351df4b2SJaegeuk Kim segno = le32_to_cpu(ckpt->cur_data_segno[i]);
3919351df4b2SJaegeuk Kim blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
3920351df4b2SJaegeuk Kim seg_i->next_segno = segno;
3921351df4b2SJaegeuk Kim reset_curseg(sbi, i, 0);
3922351df4b2SJaegeuk Kim seg_i->alloc_type = ckpt->alloc_type[i];
3923351df4b2SJaegeuk Kim seg_i->next_blkoff = blk_off;
3924351df4b2SJaegeuk Kim
3925351df4b2SJaegeuk Kim if (seg_i->alloc_type == SSR)
3926f0248ba6SJaegeuk Kim blk_off = BLKS_PER_SEG(sbi);
3927351df4b2SJaegeuk Kim
3928351df4b2SJaegeuk Kim for (j = 0; j < blk_off; j++) {
3929351df4b2SJaegeuk Kim struct f2fs_summary *s;
39305f029c04SYi Zhuang
3931351df4b2SJaegeuk Kim s = (struct f2fs_summary *)(kaddr + offset);
3932351df4b2SJaegeuk Kim seg_i->sum_blk->entries[j] = *s;
3933351df4b2SJaegeuk Kim offset += SUMMARY_SIZE;
393409cbfeafSKirill A. Shutemov if (offset + SUMMARY_SIZE <= PAGE_SIZE -
3935351df4b2SJaegeuk Kim SUM_FOOTER_SIZE)
3936351df4b2SJaegeuk Kim continue;
3937351df4b2SJaegeuk Kim
3938351df4b2SJaegeuk Kim f2fs_put_page(page, 1);
3939351df4b2SJaegeuk Kim page = NULL;
3940351df4b2SJaegeuk Kim
39414d57b86dSChao Yu page = f2fs_get_meta_page(sbi, start++);
39427735730dSChao Yu if (IS_ERR(page))
39437735730dSChao Yu return PTR_ERR(page);
3944351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page);
3945351df4b2SJaegeuk Kim offset = 0;
3946351df4b2SJaegeuk Kim }
3947351df4b2SJaegeuk Kim }
3948351df4b2SJaegeuk Kim f2fs_put_page(page, 1);
39497735730dSChao Yu return 0;
3950351df4b2SJaegeuk Kim }
3951351df4b2SJaegeuk Kim
read_normal_summaries(struct f2fs_sb_info * sbi,int type)3952351df4b2SJaegeuk Kim static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
3953351df4b2SJaegeuk Kim {
3954351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3955351df4b2SJaegeuk Kim struct f2fs_summary_block *sum;
3956351df4b2SJaegeuk Kim struct curseg_info *curseg;
3957351df4b2SJaegeuk Kim struct page *new;
3958351df4b2SJaegeuk Kim unsigned short blk_off;
3959351df4b2SJaegeuk Kim unsigned int segno = 0;
3960351df4b2SJaegeuk Kim block_t blk_addr = 0;
39617735730dSChao Yu int err = 0;
3962351df4b2SJaegeuk Kim
3963351df4b2SJaegeuk Kim /* get segment number and block addr */
3964351df4b2SJaegeuk Kim if (IS_DATASEG(type)) {
3965351df4b2SJaegeuk Kim segno = le32_to_cpu(ckpt->cur_data_segno[type]);
3966351df4b2SJaegeuk Kim blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
3967351df4b2SJaegeuk Kim CURSEG_HOT_DATA]);
3968119ee914SJaegeuk Kim if (__exist_node_summaries(sbi))
3969d0b9e42aSChao Yu blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
3970351df4b2SJaegeuk Kim else
3971351df4b2SJaegeuk Kim blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
3972351df4b2SJaegeuk Kim } else {
3973351df4b2SJaegeuk Kim segno = le32_to_cpu(ckpt->cur_node_segno[type -
3974351df4b2SJaegeuk Kim CURSEG_HOT_NODE]);
3975351df4b2SJaegeuk Kim blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
3976351df4b2SJaegeuk Kim CURSEG_HOT_NODE]);
3977119ee914SJaegeuk Kim if (__exist_node_summaries(sbi))
3978351df4b2SJaegeuk Kim blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
3979351df4b2SJaegeuk Kim type - CURSEG_HOT_NODE);
3980351df4b2SJaegeuk Kim else
3981351df4b2SJaegeuk Kim blk_addr = GET_SUM_BLOCK(sbi, segno);
3982351df4b2SJaegeuk Kim }
3983351df4b2SJaegeuk Kim
39844d57b86dSChao Yu new = f2fs_get_meta_page(sbi, blk_addr);
39857735730dSChao Yu if (IS_ERR(new))
39867735730dSChao Yu return PTR_ERR(new);
3987351df4b2SJaegeuk Kim sum = (struct f2fs_summary_block *)page_address(new);
3988351df4b2SJaegeuk Kim
3989351df4b2SJaegeuk Kim if (IS_NODESEG(type)) {
3990119ee914SJaegeuk Kim if (__exist_node_summaries(sbi)) {
3991351df4b2SJaegeuk Kim struct f2fs_summary *ns = &sum->entries[0];
3992351df4b2SJaegeuk Kim int i;
39935f029c04SYi Zhuang
3994f0248ba6SJaegeuk Kim for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) {
3995351df4b2SJaegeuk Kim ns->version = 0;
3996351df4b2SJaegeuk Kim ns->ofs_in_node = 0;
3997351df4b2SJaegeuk Kim }
3998351df4b2SJaegeuk Kim } else {
39997735730dSChao Yu err = f2fs_restore_node_summary(sbi, segno, sum);
40007735730dSChao Yu if (err)
40017735730dSChao Yu goto out;
4002351df4b2SJaegeuk Kim }
4003351df4b2SJaegeuk Kim }
4004351df4b2SJaegeuk Kim
4005351df4b2SJaegeuk Kim /* set uncompleted segment to curseg */
4006351df4b2SJaegeuk Kim curseg = CURSEG_I(sbi, type);
4007351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex);
4008b7ad7512SChao Yu
4009b7ad7512SChao Yu /* update journal info */
4010b7ad7512SChao Yu down_write(&curseg->journal_rwsem);
4011b7ad7512SChao Yu memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
4012b7ad7512SChao Yu up_write(&curseg->journal_rwsem);
4013b7ad7512SChao Yu
4014b7ad7512SChao Yu memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
4015b7ad7512SChao Yu memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
4016351df4b2SJaegeuk Kim curseg->next_segno = segno;
4017351df4b2SJaegeuk Kim reset_curseg(sbi, type, 0);
4018351df4b2SJaegeuk Kim curseg->alloc_type = ckpt->alloc_type[type];
4019351df4b2SJaegeuk Kim curseg->next_blkoff = blk_off;
4020351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex);
40217735730dSChao Yu out:
4022351df4b2SJaegeuk Kim f2fs_put_page(new, 1);
40237735730dSChao Yu return err;
4024351df4b2SJaegeuk Kim }
4025351df4b2SJaegeuk Kim
restore_curseg_summaries(struct f2fs_sb_info * sbi)4026351df4b2SJaegeuk Kim static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
4027351df4b2SJaegeuk Kim {
402821d3f8e1SJin Qian struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
402921d3f8e1SJin Qian struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
4030351df4b2SJaegeuk Kim int type = CURSEG_HOT_DATA;
4031e4fc5fbfSChao Yu int err;
4032351df4b2SJaegeuk Kim
4033aaec2b1dSChao Yu if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
40344d57b86dSChao Yu int npages = f2fs_npages_for_summary_flush(sbi, true);
40353fa06d7bSChao Yu
40363fa06d7bSChao Yu if (npages >= 2)
40374d57b86dSChao Yu f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
403826879fb1SChao Yu META_CP, true);
40393fa06d7bSChao Yu
4040351df4b2SJaegeuk Kim /* restore for compacted data summary */
40417735730dSChao Yu err = read_compacted_summaries(sbi);
40427735730dSChao Yu if (err)
40437735730dSChao Yu return err;
4044351df4b2SJaegeuk Kim type = CURSEG_HOT_NODE;
4045351df4b2SJaegeuk Kim }
4046351df4b2SJaegeuk Kim
4047119ee914SJaegeuk Kim if (__exist_node_summaries(sbi))
4048d0b9e42aSChao Yu f2fs_ra_meta_pages(sbi,
4049d0b9e42aSChao Yu sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
4050d0b9e42aSChao Yu NR_CURSEG_PERSIST_TYPE - type, META_CP, true);
40513fa06d7bSChao Yu
4052e4fc5fbfSChao Yu for (; type <= CURSEG_COLD_NODE; type++) {
4053e4fc5fbfSChao Yu err = read_normal_summaries(sbi, type);
4054e4fc5fbfSChao Yu if (err)
4055e4fc5fbfSChao Yu return err;
4056e4fc5fbfSChao Yu }
4057e4fc5fbfSChao Yu
405821d3f8e1SJin Qian /* sanity check for summary blocks */
405921d3f8e1SJin Qian if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
40609227d522SSahitya Tummala sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
4061833dcd35SJoe Perches f2fs_err(sbi, "invalid journal entries nats %u sits %u",
40629227d522SSahitya Tummala nats_in_cursum(nat_j), sits_in_cursum(sit_j));
406321d3f8e1SJin Qian return -EINVAL;
40649227d522SSahitya Tummala }
406521d3f8e1SJin Qian
4066351df4b2SJaegeuk Kim return 0;
4067351df4b2SJaegeuk Kim }
4068351df4b2SJaegeuk Kim
write_compacted_summaries(struct f2fs_sb_info * sbi,block_t blkaddr)4069351df4b2SJaegeuk Kim static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
4070351df4b2SJaegeuk Kim {
4071351df4b2SJaegeuk Kim struct page *page;
4072351df4b2SJaegeuk Kim unsigned char *kaddr;
4073351df4b2SJaegeuk Kim struct f2fs_summary *summary;
4074351df4b2SJaegeuk Kim struct curseg_info *seg_i;
4075351df4b2SJaegeuk Kim int written_size = 0;
4076351df4b2SJaegeuk Kim int i, j;
4077351df4b2SJaegeuk Kim
40784d57b86dSChao Yu page = f2fs_grab_meta_page(sbi, blkaddr++);
4079351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page);
408081114baaSChao Yu memset(kaddr, 0, PAGE_SIZE);
4081351df4b2SJaegeuk Kim
4082351df4b2SJaegeuk Kim /* Step 1: write nat cache */
4083351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
4084b7ad7512SChao Yu memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
4085351df4b2SJaegeuk Kim written_size += SUM_JOURNAL_SIZE;
4086351df4b2SJaegeuk Kim
4087351df4b2SJaegeuk Kim /* Step 2: write sit cache */
4088351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
4089b7ad7512SChao Yu memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
4090351df4b2SJaegeuk Kim written_size += SUM_JOURNAL_SIZE;
4091351df4b2SJaegeuk Kim
4092351df4b2SJaegeuk Kim /* Step 3: write summary entries */
4093351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
4094351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, i);
40956392e9ffSChristoph Hellwig for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) {
4096351df4b2SJaegeuk Kim if (!page) {
40974d57b86dSChao Yu page = f2fs_grab_meta_page(sbi, blkaddr++);
4098351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page);
409981114baaSChao Yu memset(kaddr, 0, PAGE_SIZE);
4100351df4b2SJaegeuk Kim written_size = 0;
4101351df4b2SJaegeuk Kim }
4102351df4b2SJaegeuk Kim summary = (struct f2fs_summary *)(kaddr + written_size);
4103351df4b2SJaegeuk Kim *summary = seg_i->sum_blk->entries[j];
4104351df4b2SJaegeuk Kim written_size += SUMMARY_SIZE;
4105351df4b2SJaegeuk Kim
410609cbfeafSKirill A. Shutemov if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
4107351df4b2SJaegeuk Kim SUM_FOOTER_SIZE)
4108351df4b2SJaegeuk Kim continue;
4109351df4b2SJaegeuk Kim
4110e8d61a74SChao Yu set_page_dirty(page);
4111351df4b2SJaegeuk Kim f2fs_put_page(page, 1);
4112351df4b2SJaegeuk Kim page = NULL;
4113351df4b2SJaegeuk Kim }
4114351df4b2SJaegeuk Kim }
4115e8d61a74SChao Yu if (page) {
4116e8d61a74SChao Yu set_page_dirty(page);
4117351df4b2SJaegeuk Kim f2fs_put_page(page, 1);
4118351df4b2SJaegeuk Kim }
4119e8d61a74SChao Yu }
4120351df4b2SJaegeuk Kim
write_normal_summaries(struct f2fs_sb_info * sbi,block_t blkaddr,int type)4121351df4b2SJaegeuk Kim static void write_normal_summaries(struct f2fs_sb_info *sbi,
4122351df4b2SJaegeuk Kim block_t blkaddr, int type)
4123351df4b2SJaegeuk Kim {
4124351df4b2SJaegeuk Kim int i, end;
41255f029c04SYi Zhuang
4126351df4b2SJaegeuk Kim if (IS_DATASEG(type))
4127351df4b2SJaegeuk Kim end = type + NR_CURSEG_DATA_TYPE;
4128351df4b2SJaegeuk Kim else
4129351df4b2SJaegeuk Kim end = type + NR_CURSEG_NODE_TYPE;
4130351df4b2SJaegeuk Kim
4131b7ad7512SChao Yu for (i = type; i < end; i++)
4132b7ad7512SChao Yu write_current_sum_page(sbi, i, blkaddr + (i - type));
4133351df4b2SJaegeuk Kim }
4134351df4b2SJaegeuk Kim
f2fs_write_data_summaries(struct f2fs_sb_info * sbi,block_t start_blk)41354d57b86dSChao Yu void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4136351df4b2SJaegeuk Kim {
4137aaec2b1dSChao Yu if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
4138351df4b2SJaegeuk Kim write_compacted_summaries(sbi, start_blk);
4139351df4b2SJaegeuk Kim else
4140351df4b2SJaegeuk Kim write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
4141351df4b2SJaegeuk Kim }
4142351df4b2SJaegeuk Kim
f2fs_write_node_summaries(struct f2fs_sb_info * sbi,block_t start_blk)41434d57b86dSChao Yu void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4144351df4b2SJaegeuk Kim {
4145351df4b2SJaegeuk Kim write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
4146351df4b2SJaegeuk Kim }
4147351df4b2SJaegeuk Kim
f2fs_lookup_journal_in_cursum(struct f2fs_journal * journal,int type,unsigned int val,int alloc)41484d57b86dSChao Yu int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
4149351df4b2SJaegeuk Kim unsigned int val, int alloc)
4150351df4b2SJaegeuk Kim {
4151351df4b2SJaegeuk Kim int i;
4152351df4b2SJaegeuk Kim
4153351df4b2SJaegeuk Kim if (type == NAT_JOURNAL) {
4154dfc08a12SChao Yu for (i = 0; i < nats_in_cursum(journal); i++) {
4155dfc08a12SChao Yu if (le32_to_cpu(nid_in_journal(journal, i)) == val)
4156351df4b2SJaegeuk Kim return i;
4157351df4b2SJaegeuk Kim }
4158dfc08a12SChao Yu if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
4159dfc08a12SChao Yu return update_nats_in_cursum(journal, 1);
4160351df4b2SJaegeuk Kim } else if (type == SIT_JOURNAL) {
4161dfc08a12SChao Yu for (i = 0; i < sits_in_cursum(journal); i++)
4162dfc08a12SChao Yu if (le32_to_cpu(segno_in_journal(journal, i)) == val)
4163351df4b2SJaegeuk Kim return i;
4164dfc08a12SChao Yu if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
4165dfc08a12SChao Yu return update_sits_in_cursum(journal, 1);
4166351df4b2SJaegeuk Kim }
4167351df4b2SJaegeuk Kim return -1;
4168351df4b2SJaegeuk Kim }
4169351df4b2SJaegeuk Kim
get_current_sit_page(struct f2fs_sb_info * sbi,unsigned int segno)4170351df4b2SJaegeuk Kim static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
4171351df4b2SJaegeuk Kim unsigned int segno)
4172351df4b2SJaegeuk Kim {
417386f33603SJaegeuk Kim return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
4174351df4b2SJaegeuk Kim }
4175351df4b2SJaegeuk Kim
get_next_sit_page(struct f2fs_sb_info * sbi,unsigned int start)4176351df4b2SJaegeuk Kim static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
4177351df4b2SJaegeuk Kim unsigned int start)
4178351df4b2SJaegeuk Kim {
4179351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi);
4180068c3cd8SYunlei He struct page *page;
4181351df4b2SJaegeuk Kim pgoff_t src_off, dst_off;
4182351df4b2SJaegeuk Kim
4183351df4b2SJaegeuk Kim src_off = current_sit_addr(sbi, start);
4184351df4b2SJaegeuk Kim dst_off = next_sit_addr(sbi, src_off);
4185351df4b2SJaegeuk Kim
41864d57b86dSChao Yu page = f2fs_grab_meta_page(sbi, dst_off);
4187068c3cd8SYunlei He seg_info_to_sit_page(sbi, page, start);
4188351df4b2SJaegeuk Kim
4189068c3cd8SYunlei He set_page_dirty(page);
4190351df4b2SJaegeuk Kim set_to_next_sit(sit_i, start);
4191351df4b2SJaegeuk Kim
4192068c3cd8SYunlei He return page;
4193351df4b2SJaegeuk Kim }
4194351df4b2SJaegeuk Kim
grab_sit_entry_set(void)4195184a5cd2SChao Yu static struct sit_entry_set *grab_sit_entry_set(void)
4196184a5cd2SChao Yu {
4197184a5cd2SChao Yu struct sit_entry_set *ses =
419832410577SChao Yu f2fs_kmem_cache_alloc(sit_entry_set_slab,
419932410577SChao Yu GFP_NOFS, true, NULL);
4200184a5cd2SChao Yu
4201184a5cd2SChao Yu ses->entry_cnt = 0;
4202184a5cd2SChao Yu INIT_LIST_HEAD(&ses->set_list);
4203184a5cd2SChao Yu return ses;
4204184a5cd2SChao Yu }
4205184a5cd2SChao Yu
release_sit_entry_set(struct sit_entry_set * ses)4206184a5cd2SChao Yu static void release_sit_entry_set(struct sit_entry_set *ses)
4207184a5cd2SChao Yu {
4208184a5cd2SChao Yu list_del(&ses->set_list);
4209184a5cd2SChao Yu kmem_cache_free(sit_entry_set_slab, ses);
4210184a5cd2SChao Yu }
4211184a5cd2SChao Yu
adjust_sit_entry_set(struct sit_entry_set * ses,struct list_head * head)4212184a5cd2SChao Yu static void adjust_sit_entry_set(struct sit_entry_set *ses,
4213184a5cd2SChao Yu struct list_head *head)
4214184a5cd2SChao Yu {
4215184a5cd2SChao Yu struct sit_entry_set *next = ses;
4216184a5cd2SChao Yu
4217184a5cd2SChao Yu if (list_is_last(&ses->set_list, head))
4218184a5cd2SChao Yu return;
4219184a5cd2SChao Yu
4220184a5cd2SChao Yu list_for_each_entry_continue(next, head, set_list)
4221df35435dSJakob Koschel if (ses->entry_cnt <= next->entry_cnt) {
4222184a5cd2SChao Yu list_move_tail(&ses->set_list, &next->set_list);
4223df35435dSJakob Koschel return;
4224df35435dSJakob Koschel }
4225df35435dSJakob Koschel
4226df35435dSJakob Koschel list_move_tail(&ses->set_list, head);
4227184a5cd2SChao Yu }
4228184a5cd2SChao Yu
add_sit_entry(unsigned int segno,struct list_head * head)4229184a5cd2SChao Yu static void add_sit_entry(unsigned int segno, struct list_head *head)
4230184a5cd2SChao Yu {
4231184a5cd2SChao Yu struct sit_entry_set *ses;
4232184a5cd2SChao Yu unsigned int start_segno = START_SEGNO(segno);
4233184a5cd2SChao Yu
4234184a5cd2SChao Yu list_for_each_entry(ses, head, set_list) {
4235184a5cd2SChao Yu if (ses->start_segno == start_segno) {
4236184a5cd2SChao Yu ses->entry_cnt++;
4237184a5cd2SChao Yu adjust_sit_entry_set(ses, head);
4238184a5cd2SChao Yu return;
4239184a5cd2SChao Yu }
4240184a5cd2SChao Yu }
4241184a5cd2SChao Yu
4242184a5cd2SChao Yu ses = grab_sit_entry_set();
4243184a5cd2SChao Yu
4244184a5cd2SChao Yu ses->start_segno = start_segno;
4245184a5cd2SChao Yu ses->entry_cnt++;
4246184a5cd2SChao Yu list_add(&ses->set_list, head);
4247184a5cd2SChao Yu }
4248184a5cd2SChao Yu
add_sits_in_set(struct f2fs_sb_info * sbi)4249184a5cd2SChao Yu static void add_sits_in_set(struct f2fs_sb_info *sbi)
4250184a5cd2SChao Yu {
4251184a5cd2SChao Yu struct f2fs_sm_info *sm_info = SM_I(sbi);
4252184a5cd2SChao Yu struct list_head *set_list = &sm_info->sit_entry_set;
4253184a5cd2SChao Yu unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4254184a5cd2SChao Yu unsigned int segno;
4255184a5cd2SChao Yu
42567cd8558bSJaegeuk Kim for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4257184a5cd2SChao Yu add_sit_entry(segno, set_list);
4258184a5cd2SChao Yu }
4259184a5cd2SChao Yu
remove_sits_in_journal(struct f2fs_sb_info * sbi)4260184a5cd2SChao Yu static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4261351df4b2SJaegeuk Kim {
4262351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4263b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal;
4264351df4b2SJaegeuk Kim int i;
4265351df4b2SJaegeuk Kim
4266b7ad7512SChao Yu down_write(&curseg->journal_rwsem);
4267dfc08a12SChao Yu for (i = 0; i < sits_in_cursum(journal); i++) {
4268351df4b2SJaegeuk Kim unsigned int segno;
4269184a5cd2SChao Yu bool dirtied;
4270184a5cd2SChao Yu
4271dfc08a12SChao Yu segno = le32_to_cpu(segno_in_journal(journal, i));
4272184a5cd2SChao Yu dirtied = __mark_sit_entry_dirty(sbi, segno);
4273184a5cd2SChao Yu
4274184a5cd2SChao Yu if (!dirtied)
4275184a5cd2SChao Yu add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4276351df4b2SJaegeuk Kim }
4277dfc08a12SChao Yu update_sits_in_cursum(journal, -i);
4278b7ad7512SChao Yu up_write(&curseg->journal_rwsem);
4279351df4b2SJaegeuk Kim }
4280351df4b2SJaegeuk Kim
42810a8165d7SJaegeuk Kim /*
4282351df4b2SJaegeuk Kim * CP calls this function, which flushes SIT entries including sit_journal,
4283351df4b2SJaegeuk Kim * and moves prefree segs to free segs.
4284351df4b2SJaegeuk Kim */
f2fs_flush_sit_entries(struct f2fs_sb_info * sbi,struct cp_control * cpc)42854d57b86dSChao Yu void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4286351df4b2SJaegeuk Kim {
4287351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi);
4288351df4b2SJaegeuk Kim unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
4289351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4290b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal;
4291184a5cd2SChao Yu struct sit_entry_set *ses, *tmp;
4292184a5cd2SChao Yu struct list_head *head = &SM_I(sbi)->sit_entry_set;
429304f0b2eaSQiuyang Sun bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
42944b2fecc8SJaegeuk Kim struct seg_entry *se;
4295351df4b2SJaegeuk Kim
42963d26fa6bSChao Yu down_write(&sit_i->sentry_lock);
4297351df4b2SJaegeuk Kim
42982b11a74bSWanpeng Li if (!sit_i->dirty_sentries)
42992b11a74bSWanpeng Li goto out;
43002b11a74bSWanpeng Li
4301351df4b2SJaegeuk Kim /*
4302184a5cd2SChao Yu * add and account sit entries of dirty bitmap in sit entry
4303184a5cd2SChao Yu * set temporarily
4304351df4b2SJaegeuk Kim */
4305184a5cd2SChao Yu add_sits_in_set(sbi);
4306351df4b2SJaegeuk Kim
4307184a5cd2SChao Yu /*
4308184a5cd2SChao Yu * if there are no enough space in journal to store dirty sit
4309184a5cd2SChao Yu * entries, remove all entries from journal and add and account
4310184a5cd2SChao Yu * them in sit entry set.
4311184a5cd2SChao Yu */
431204f0b2eaSQiuyang Sun if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
431304f0b2eaSQiuyang Sun !to_journal)
4314184a5cd2SChao Yu remove_sits_in_journal(sbi);
4315184a5cd2SChao Yu
4316184a5cd2SChao Yu /*
4317184a5cd2SChao Yu * there are two steps to flush sit entries:
4318184a5cd2SChao Yu * #1, flush sit entries to journal in current cold data summary block.
4319184a5cd2SChao Yu * #2, flush sit entries to sit page.
4320184a5cd2SChao Yu */
4321184a5cd2SChao Yu list_for_each_entry_safe(ses, tmp, head, set_list) {
43224a257ed6SJaegeuk Kim struct page *page = NULL;
4323184a5cd2SChao Yu struct f2fs_sit_block *raw_sit = NULL;
4324184a5cd2SChao Yu unsigned int start_segno = ses->start_segno;
4325184a5cd2SChao Yu unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
43267cd8558bSJaegeuk Kim (unsigned long)MAIN_SEGS(sbi));
4327184a5cd2SChao Yu unsigned int segno = start_segno;
4328184a5cd2SChao Yu
4329184a5cd2SChao Yu if (to_journal &&
4330dfc08a12SChao Yu !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
4331184a5cd2SChao Yu to_journal = false;
4332184a5cd2SChao Yu
4333b7ad7512SChao Yu if (to_journal) {
4334b7ad7512SChao Yu down_write(&curseg->journal_rwsem);
4335b7ad7512SChao Yu } else {
4336184a5cd2SChao Yu page = get_next_sit_page(sbi, start_segno);
4337184a5cd2SChao Yu raw_sit = page_address(page);
4338184a5cd2SChao Yu }
4339184a5cd2SChao Yu
4340184a5cd2SChao Yu /* flush dirty sit entries in region of current sit set */
4341184a5cd2SChao Yu for_each_set_bit_from(segno, bitmap, end) {
4342184a5cd2SChao Yu int offset, sit_offset;
43434b2fecc8SJaegeuk Kim
43444b2fecc8SJaegeuk Kim se = get_seg_entry(sbi, segno);
434556b07e7eSZhikang Zhang #ifdef CONFIG_F2FS_CHECK_FS
434656b07e7eSZhikang Zhang if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
434756b07e7eSZhikang Zhang SIT_VBLOCK_MAP_SIZE))
434856b07e7eSZhikang Zhang f2fs_bug_on(sbi, 1);
434956b07e7eSZhikang Zhang #endif
4350351df4b2SJaegeuk Kim
4351b2955550SJaegeuk Kim /* add discard candidates */
4352c473f1a9SChao Yu if (!(cpc->reason & CP_DISCARD)) {
43534b2fecc8SJaegeuk Kim cpc->trim_start = segno;
435425290fa5SJaegeuk Kim add_discard_addrs(sbi, cpc, false);
43554b2fecc8SJaegeuk Kim }
4356b2955550SJaegeuk Kim
4357184a5cd2SChao Yu if (to_journal) {
43584d57b86dSChao Yu offset = f2fs_lookup_journal_in_cursum(journal,
4359184a5cd2SChao Yu SIT_JOURNAL, segno, 1);
4360184a5cd2SChao Yu f2fs_bug_on(sbi, offset < 0);
4361dfc08a12SChao Yu segno_in_journal(journal, offset) =
4362184a5cd2SChao Yu cpu_to_le32(segno);
4363184a5cd2SChao Yu seg_info_to_raw_sit(se,
4364dfc08a12SChao Yu &sit_in_journal(journal, offset));
436556b07e7eSZhikang Zhang check_block_count(sbi, segno,
436656b07e7eSZhikang Zhang &sit_in_journal(journal, offset));
4367184a5cd2SChao Yu } else {
4368184a5cd2SChao Yu sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
4369184a5cd2SChao Yu seg_info_to_raw_sit(se,
4370184a5cd2SChao Yu &raw_sit->entries[sit_offset]);
437156b07e7eSZhikang Zhang check_block_count(sbi, segno,
437256b07e7eSZhikang Zhang &raw_sit->entries[sit_offset]);
4373351df4b2SJaegeuk Kim }
4374351df4b2SJaegeuk Kim
4375351df4b2SJaegeuk Kim __clear_bit(segno, bitmap);
4376351df4b2SJaegeuk Kim sit_i->dirty_sentries--;
4377184a5cd2SChao Yu ses->entry_cnt--;
4378351df4b2SJaegeuk Kim }
4379184a5cd2SChao Yu
4380b7ad7512SChao Yu if (to_journal)
4381b7ad7512SChao Yu up_write(&curseg->journal_rwsem);
4382b7ad7512SChao Yu else
4383184a5cd2SChao Yu f2fs_put_page(page, 1);
4384184a5cd2SChao Yu
4385184a5cd2SChao Yu f2fs_bug_on(sbi, ses->entry_cnt);
4386184a5cd2SChao Yu release_sit_entry_set(ses);
4387184a5cd2SChao Yu }
4388184a5cd2SChao Yu
4389184a5cd2SChao Yu f2fs_bug_on(sbi, !list_empty(head));
4390184a5cd2SChao Yu f2fs_bug_on(sbi, sit_i->dirty_sentries);
4391184a5cd2SChao Yu out:
4392c473f1a9SChao Yu if (cpc->reason & CP_DISCARD) {
4393650d3c4eSYunlei He __u64 trim_start = cpc->trim_start;
4394650d3c4eSYunlei He
43954b2fecc8SJaegeuk Kim for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
439625290fa5SJaegeuk Kim add_discard_addrs(sbi, cpc, false);
4397650d3c4eSYunlei He
4398650d3c4eSYunlei He cpc->trim_start = trim_start;
43994b2fecc8SJaegeuk Kim }
44003d26fa6bSChao Yu up_write(&sit_i->sentry_lock);
4401351df4b2SJaegeuk Kim
4402351df4b2SJaegeuk Kim set_prefree_as_free_segments(sbi);
4403351df4b2SJaegeuk Kim }
4404351df4b2SJaegeuk Kim
build_sit_info(struct f2fs_sb_info * sbi)4405351df4b2SJaegeuk Kim static int build_sit_info(struct f2fs_sb_info *sbi)
4406351df4b2SJaegeuk Kim {
4407351df4b2SJaegeuk Kim struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4408351df4b2SJaegeuk Kim struct sit_info *sit_i;
4409351df4b2SJaegeuk Kim unsigned int sit_segs, start;
44102fde3dd1SChao Yu char *src_bitmap, *bitmap;
4411bbf9f7d9SSahitya Tummala unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
44124f993264SChao Yu unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0;
4413351df4b2SJaegeuk Kim
4414351df4b2SJaegeuk Kim /* allocate memory for SIT information */
4415acbf054dSChao Yu sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4416351df4b2SJaegeuk Kim if (!sit_i)
4417351df4b2SJaegeuk Kim return -ENOMEM;
4418351df4b2SJaegeuk Kim
4419351df4b2SJaegeuk Kim SM_I(sbi)->sit_info = sit_i;
4420351df4b2SJaegeuk Kim
44219d2a789cSKees Cook sit_i->sentries =
44229d2a789cSKees Cook f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
44239d2a789cSKees Cook MAIN_SEGS(sbi)),
44249d2a789cSKees Cook GFP_KERNEL);
4425351df4b2SJaegeuk Kim if (!sit_i->sentries)
4426351df4b2SJaegeuk Kim return -ENOMEM;
4427351df4b2SJaegeuk Kim
4428bbf9f7d9SSahitya Tummala main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4429bbf9f7d9SSahitya Tummala sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
4430628b3d14SChao Yu GFP_KERNEL);
4431351df4b2SJaegeuk Kim if (!sit_i->dirty_sentries_bitmap)
4432351df4b2SJaegeuk Kim return -ENOMEM;
4433351df4b2SJaegeuk Kim
44342fde3dd1SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
44354f993264SChao Yu bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map);
44362fde3dd1SChao Yu #else
44374f993264SChao Yu bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map);
44382fde3dd1SChao Yu #endif
44392fde3dd1SChao Yu sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
44402fde3dd1SChao Yu if (!sit_i->bitmap)
44413e025740SJaegeuk Kim return -ENOMEM;
44423e025740SJaegeuk Kim
44432fde3dd1SChao Yu bitmap = sit_i->bitmap;
44442fde3dd1SChao Yu
44452fde3dd1SChao Yu for (start = 0; start < MAIN_SEGS(sbi); start++) {
44462fde3dd1SChao Yu sit_i->sentries[start].cur_valid_map = bitmap;
44472fde3dd1SChao Yu bitmap += SIT_VBLOCK_MAP_SIZE;
44482fde3dd1SChao Yu
44492fde3dd1SChao Yu sit_i->sentries[start].ckpt_valid_map = bitmap;
44502fde3dd1SChao Yu bitmap += SIT_VBLOCK_MAP_SIZE;
44512fde3dd1SChao Yu
4452355e7891SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
44532fde3dd1SChao Yu sit_i->sentries[start].cur_valid_map_mir = bitmap;
44542fde3dd1SChao Yu bitmap += SIT_VBLOCK_MAP_SIZE;
4455355e7891SChao Yu #endif
4456355e7891SChao Yu
44574f993264SChao Yu if (discard_map) {
44582fde3dd1SChao Yu sit_i->sentries[start].discard_map = bitmap;
44592fde3dd1SChao Yu bitmap += SIT_VBLOCK_MAP_SIZE;
4460351df4b2SJaegeuk Kim }
44614f993264SChao Yu }
4462351df4b2SJaegeuk Kim
4463acbf054dSChao Yu sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
446460a3b782SJaegeuk Kim if (!sit_i->tmp_map)
446560a3b782SJaegeuk Kim return -ENOMEM;
446660a3b782SJaegeuk Kim
44672c70c5e3SChao Yu if (__is_large_section(sbi)) {
44689d2a789cSKees Cook sit_i->sec_entries =
44699d2a789cSKees Cook f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
44709d2a789cSKees Cook MAIN_SECS(sbi)),
44719d2a789cSKees Cook GFP_KERNEL);
4472351df4b2SJaegeuk Kim if (!sit_i->sec_entries)
4473351df4b2SJaegeuk Kim return -ENOMEM;
4474351df4b2SJaegeuk Kim }
4475351df4b2SJaegeuk Kim
4476351df4b2SJaegeuk Kim /* get information related with SIT */
4477351df4b2SJaegeuk Kim sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
4478351df4b2SJaegeuk Kim
4479351df4b2SJaegeuk Kim /* setup SIT bitmap from ckeckpoint pack */
4480bbf9f7d9SSahitya Tummala sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4481351df4b2SJaegeuk Kim src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4482351df4b2SJaegeuk Kim
4483bbf9f7d9SSahitya Tummala sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
4484ae27d62eSChao Yu if (!sit_i->sit_bitmap)
4485351df4b2SJaegeuk Kim return -ENOMEM;
4486351df4b2SJaegeuk Kim
4487ae27d62eSChao Yu #ifdef CONFIG_F2FS_CHECK_FS
4488bbf9f7d9SSahitya Tummala sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
4489bbf9f7d9SSahitya Tummala sit_bitmap_size, GFP_KERNEL);
4490ae27d62eSChao Yu if (!sit_i->sit_bitmap_mir)
4491ae27d62eSChao Yu return -ENOMEM;
4492bbf9f7d9SSahitya Tummala
4493bbf9f7d9SSahitya Tummala sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4494bbf9f7d9SSahitya Tummala main_bitmap_size, GFP_KERNEL);
4495bbf9f7d9SSahitya Tummala if (!sit_i->invalid_segmap)
4496bbf9f7d9SSahitya Tummala return -ENOMEM;
4497ae27d62eSChao Yu #endif
4498ae27d62eSChao Yu
4499351df4b2SJaegeuk Kim sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
4500351df4b2SJaegeuk Kim sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
4501c79b7ff1SJaegeuk Kim sit_i->written_valid_blocks = 0;
4502bbf9f7d9SSahitya Tummala sit_i->bitmap_size = sit_bitmap_size;
4503351df4b2SJaegeuk Kim sit_i->dirty_sentries = 0;
4504351df4b2SJaegeuk Kim sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
4505351df4b2SJaegeuk Kim sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4506a7e679b5SJaegeuk Kim sit_i->mounted_time = ktime_get_boottime_seconds();
45073d26fa6bSChao Yu init_rwsem(&sit_i->sentry_lock);
4508351df4b2SJaegeuk Kim return 0;
4509351df4b2SJaegeuk Kim }
4510351df4b2SJaegeuk Kim
build_free_segmap(struct f2fs_sb_info * sbi)4511351df4b2SJaegeuk Kim static int build_free_segmap(struct f2fs_sb_info *sbi)
4512351df4b2SJaegeuk Kim {
4513351df4b2SJaegeuk Kim struct free_segmap_info *free_i;
4514351df4b2SJaegeuk Kim unsigned int bitmap_size, sec_bitmap_size;
4515351df4b2SJaegeuk Kim
4516351df4b2SJaegeuk Kim /* allocate memory for free segmap information */
4517acbf054dSChao Yu free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4518351df4b2SJaegeuk Kim if (!free_i)
4519351df4b2SJaegeuk Kim return -ENOMEM;
4520351df4b2SJaegeuk Kim
4521351df4b2SJaegeuk Kim SM_I(sbi)->free_info = free_i;
4522351df4b2SJaegeuk Kim
45237cd8558bSJaegeuk Kim bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4524628b3d14SChao Yu free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4525351df4b2SJaegeuk Kim if (!free_i->free_segmap)
4526351df4b2SJaegeuk Kim return -ENOMEM;
4527351df4b2SJaegeuk Kim
45287cd8558bSJaegeuk Kim sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4529628b3d14SChao Yu free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4530351df4b2SJaegeuk Kim if (!free_i->free_secmap)
4531351df4b2SJaegeuk Kim return -ENOMEM;
4532351df4b2SJaegeuk Kim
4533351df4b2SJaegeuk Kim /* set all segments as dirty temporarily */
4534351df4b2SJaegeuk Kim memset(free_i->free_segmap, 0xff, bitmap_size);
4535351df4b2SJaegeuk Kim memset(free_i->free_secmap, 0xff, sec_bitmap_size);
4536351df4b2SJaegeuk Kim
4537351df4b2SJaegeuk Kim /* init free segmap information */
45387cd8558bSJaegeuk Kim free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4539351df4b2SJaegeuk Kim free_i->free_segments = 0;
4540351df4b2SJaegeuk Kim free_i->free_sections = 0;
45411a118ccfSChao Yu spin_lock_init(&free_i->segmap_lock);
4542351df4b2SJaegeuk Kim return 0;
4543351df4b2SJaegeuk Kim }
4544351df4b2SJaegeuk Kim
build_curseg(struct f2fs_sb_info * sbi)4545351df4b2SJaegeuk Kim static int build_curseg(struct f2fs_sb_info *sbi)
4546351df4b2SJaegeuk Kim {
45471042d60fSNamjae Jeon struct curseg_info *array;
4548351df4b2SJaegeuk Kim int i;
4549351df4b2SJaegeuk Kim
4550d0b9e42aSChao Yu array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4551d0b9e42aSChao Yu sizeof(*array)), GFP_KERNEL);
4552351df4b2SJaegeuk Kim if (!array)
4553351df4b2SJaegeuk Kim return -ENOMEM;
4554351df4b2SJaegeuk Kim
4555351df4b2SJaegeuk Kim SM_I(sbi)->curseg_array = array;
4556351df4b2SJaegeuk Kim
4557d0b9e42aSChao Yu for (i = 0; i < NO_CHECK_TYPE; i++) {
4558351df4b2SJaegeuk Kim mutex_init(&array[i].curseg_mutex);
4559acbf054dSChao Yu array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
4560351df4b2SJaegeuk Kim if (!array[i].sum_blk)
4561351df4b2SJaegeuk Kim return -ENOMEM;
4562b7ad7512SChao Yu init_rwsem(&array[i].journal_rwsem);
4563acbf054dSChao Yu array[i].journal = f2fs_kzalloc(sbi,
4564acbf054dSChao Yu sizeof(struct f2fs_journal), GFP_KERNEL);
4565b7ad7512SChao Yu if (!array[i].journal)
4566b7ad7512SChao Yu return -ENOMEM;
4567d0b9e42aSChao Yu if (i < NR_PERSISTENT_LOG)
4568d0b9e42aSChao Yu array[i].seg_type = CURSEG_HOT_DATA + i;
4569d0b9e42aSChao Yu else if (i == CURSEG_COLD_DATA_PINNED)
4570d0b9e42aSChao Yu array[i].seg_type = CURSEG_COLD_DATA;
4571093749e2SChao Yu else if (i == CURSEG_ALL_DATA_ATGC)
4572093749e2SChao Yu array[i].seg_type = CURSEG_COLD_DATA;
4573351df4b2SJaegeuk Kim array[i].segno = NULL_SEGNO;
4574351df4b2SJaegeuk Kim array[i].next_blkoff = 0;
4575d0b9e42aSChao Yu array[i].inited = false;
4576351df4b2SJaegeuk Kim }
4577351df4b2SJaegeuk Kim return restore_curseg_summaries(sbi);
4578351df4b2SJaegeuk Kim }
4579351df4b2SJaegeuk Kim
build_sit_entries(struct f2fs_sb_info * sbi)4580c39a1b34SJaegeuk Kim static int build_sit_entries(struct f2fs_sb_info *sbi)
4581351df4b2SJaegeuk Kim {
4582351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi);
4583351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4584b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal;
45859c094040SYunlei He struct seg_entry *se;
45869c094040SYunlei He struct f2fs_sit_entry sit;
458774de593aSChao Yu int sit_blk_cnt = SIT_BLK_CNT(sbi);
458874de593aSChao Yu unsigned int i, start, end;
458974de593aSChao Yu unsigned int readed, start_blk = 0;
4590c39a1b34SJaegeuk Kim int err = 0;
45916b8beca0SChao Yu block_t sit_valid_blocks[2] = {0, 0};
4592351df4b2SJaegeuk Kim
459374de593aSChao Yu do {
4594a8affc03SChristoph Hellwig readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
4595664ba972SJaegeuk Kim META_SIT, true);
459674de593aSChao Yu
459774de593aSChao Yu start = start_blk * sit_i->sents_per_block;
459874de593aSChao Yu end = (start_blk + readed) * sit_i->sents_per_block;
459974de593aSChao Yu
46007cd8558bSJaegeuk Kim for (; start < end && start < MAIN_SEGS(sbi); start++) {
4601351df4b2SJaegeuk Kim struct f2fs_sit_block *sit_blk;
4602351df4b2SJaegeuk Kim struct page *page;
4603351df4b2SJaegeuk Kim
46049c094040SYunlei He se = &sit_i->sentries[start];
4605351df4b2SJaegeuk Kim page = get_current_sit_page(sbi, start);
4606edc55aafSJaegeuk Kim if (IS_ERR(page))
4607edc55aafSJaegeuk Kim return PTR_ERR(page);
4608351df4b2SJaegeuk Kim sit_blk = (struct f2fs_sit_block *)page_address(page);
4609351df4b2SJaegeuk Kim sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
4610351df4b2SJaegeuk Kim f2fs_put_page(page, 1);
4611d600af23SChao Yu
4612c39a1b34SJaegeuk Kim err = check_block_count(sbi, start, &sit);
4613c39a1b34SJaegeuk Kim if (err)
4614c39a1b34SJaegeuk Kim return err;
4615351df4b2SJaegeuk Kim seg_info_from_raw_sit(se, &sit);
46166b8beca0SChao Yu
461709beadf2SChao Yu if (se->type >= NR_PERSISTENT_LOG) {
461809beadf2SChao Yu f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
461909beadf2SChao Yu se->type, start);
462095fa90c9SChao Yu f2fs_handle_error(sbi,
462195fa90c9SChao Yu ERROR_INCONSISTENT_SUM_TYPE);
462209beadf2SChao Yu return -EFSCORRUPTED;
462309beadf2SChao Yu }
462409beadf2SChao Yu
46256b8beca0SChao Yu sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
4626a66cdd98SJaegeuk Kim
4627f0248ba6SJaegeuk Kim if (!f2fs_block_unit_discard(sbi))
4628f0248ba6SJaegeuk Kim goto init_discard_map_done;
4629f0248ba6SJaegeuk Kim
4630a66cdd98SJaegeuk Kim /* build discard map only one time */
46311f43e2adSChao Yu if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
46321f43e2adSChao Yu memset(se->discard_map, 0xff,
46333e025740SJaegeuk Kim SIT_VBLOCK_MAP_SIZE);
4634f0248ba6SJaegeuk Kim goto init_discard_map_done;
4635f0248ba6SJaegeuk Kim }
4636f0248ba6SJaegeuk Kim memcpy(se->discard_map, se->cur_valid_map,
46371f43e2adSChao Yu SIT_VBLOCK_MAP_SIZE);
4638f0248ba6SJaegeuk Kim sbi->discard_blks += BLKS_PER_SEG(sbi) -
46393e025740SJaegeuk Kim se->valid_blocks;
4640f0248ba6SJaegeuk Kim init_discard_map_done:
46412c70c5e3SChao Yu if (__is_large_section(sbi))
4642d600af23SChao Yu get_sec_entry(sbi, start)->valid_blocks +=
4643d600af23SChao Yu se->valid_blocks;
4644351df4b2SJaegeuk Kim }
464574de593aSChao Yu start_blk += readed;
464674de593aSChao Yu } while (start_blk < sit_blk_cnt);
4647d600af23SChao Yu
4648d600af23SChao Yu down_read(&curseg->journal_rwsem);
4649d600af23SChao Yu for (i = 0; i < sits_in_cursum(journal); i++) {
4650d600af23SChao Yu unsigned int old_valid_blocks;
4651d600af23SChao Yu
4652d600af23SChao Yu start = le32_to_cpu(segno_in_journal(journal, i));
4653b2ca374fSJaegeuk Kim if (start >= MAIN_SEGS(sbi)) {
4654dcbb4c10SJoe Perches f2fs_err(sbi, "Wrong journal entry on segno %u",
4655b2ca374fSJaegeuk Kim start);
465610f966bbSChao Yu err = -EFSCORRUPTED;
465795fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL);
4658b2ca374fSJaegeuk Kim break;
4659b2ca374fSJaegeuk Kim }
4660b2ca374fSJaegeuk Kim
4661d600af23SChao Yu se = &sit_i->sentries[start];
4662d600af23SChao Yu sit = sit_in_journal(journal, i);
4663d600af23SChao Yu
4664d600af23SChao Yu old_valid_blocks = se->valid_blocks;
46656b8beca0SChao Yu
46666b8beca0SChao Yu sit_valid_blocks[SE_PAGETYPE(se)] -= old_valid_blocks;
4667d600af23SChao Yu
4668c39a1b34SJaegeuk Kim err = check_block_count(sbi, start, &sit);
4669c39a1b34SJaegeuk Kim if (err)
4670c39a1b34SJaegeuk Kim break;
4671d600af23SChao Yu seg_info_from_raw_sit(se, &sit);
46726b8beca0SChao Yu
467309beadf2SChao Yu if (se->type >= NR_PERSISTENT_LOG) {
467409beadf2SChao Yu f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
467509beadf2SChao Yu se->type, start);
467609beadf2SChao Yu err = -EFSCORRUPTED;
467795fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
467809beadf2SChao Yu break;
467909beadf2SChao Yu }
468009beadf2SChao Yu
46816b8beca0SChao Yu sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
4682d600af23SChao Yu
46834f993264SChao Yu if (f2fs_block_unit_discard(sbi)) {
46841f43e2adSChao Yu if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
46857d20c8abSChao Yu memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
46861f43e2adSChao Yu } else {
4687d600af23SChao Yu memcpy(se->discard_map, se->cur_valid_map,
4688d600af23SChao Yu SIT_VBLOCK_MAP_SIZE);
4689a9af3fdcSChao Yu sbi->discard_blks += old_valid_blocks;
4690a9af3fdcSChao Yu sbi->discard_blks -= se->valid_blocks;
4691d600af23SChao Yu }
46924f993264SChao Yu }
4693d600af23SChao Yu
46942c70c5e3SChao Yu if (__is_large_section(sbi)) {
4695d600af23SChao Yu get_sec_entry(sbi, start)->valid_blocks +=
4696a9af3fdcSChao Yu se->valid_blocks;
4697a9af3fdcSChao Yu get_sec_entry(sbi, start)->valid_blocks -=
4698a9af3fdcSChao Yu old_valid_blocks;
4699a9af3fdcSChao Yu }
4700d600af23SChao Yu }
4701d600af23SChao Yu up_read(&curseg->journal_rwsem);
47028a29c126SJaegeuk Kim
47036b8beca0SChao Yu if (err)
47046b8beca0SChao Yu return err;
47056b8beca0SChao Yu
47066b8beca0SChao Yu if (sit_valid_blocks[NODE] != valid_node_count(sbi)) {
4707dcbb4c10SJoe Perches f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
47086b8beca0SChao Yu sit_valid_blocks[NODE], valid_node_count(sbi));
470995fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT);
47106b8beca0SChao Yu return -EFSCORRUPTED;
47118a29c126SJaegeuk Kim }
47128a29c126SJaegeuk Kim
47136b8beca0SChao Yu if (sit_valid_blocks[DATA] + sit_valid_blocks[NODE] >
47146b8beca0SChao Yu valid_user_blocks(sbi)) {
47156b8beca0SChao Yu f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u",
47166b8beca0SChao Yu sit_valid_blocks[DATA], sit_valid_blocks[NODE],
47176b8beca0SChao Yu valid_user_blocks(sbi));
471895fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT);
47196b8beca0SChao Yu return -EFSCORRUPTED;
47206b8beca0SChao Yu }
47216b8beca0SChao Yu
47226b8beca0SChao Yu return 0;
4723351df4b2SJaegeuk Kim }
4724351df4b2SJaegeuk Kim
init_free_segmap(struct f2fs_sb_info * sbi)4725351df4b2SJaegeuk Kim static void init_free_segmap(struct f2fs_sb_info *sbi)
4726351df4b2SJaegeuk Kim {
4727351df4b2SJaegeuk Kim unsigned int start;
4728351df4b2SJaegeuk Kim int type;
4729de881df9SAravind Ramesh struct seg_entry *sentry;
4730351df4b2SJaegeuk Kim
47317cd8558bSJaegeuk Kim for (start = 0; start < MAIN_SEGS(sbi); start++) {
4732de881df9SAravind Ramesh if (f2fs_usable_blks_in_seg(sbi, start) == 0)
4733de881df9SAravind Ramesh continue;
4734de881df9SAravind Ramesh sentry = get_seg_entry(sbi, start);
4735351df4b2SJaegeuk Kim if (!sentry->valid_blocks)
4736351df4b2SJaegeuk Kim __set_free(sbi, start);
4737c79b7ff1SJaegeuk Kim else
4738c79b7ff1SJaegeuk Kim SIT_I(sbi)->written_valid_blocks +=
4739c79b7ff1SJaegeuk Kim sentry->valid_blocks;
4740351df4b2SJaegeuk Kim }
4741351df4b2SJaegeuk Kim
4742351df4b2SJaegeuk Kim /* set use the current segments */
4743351df4b2SJaegeuk Kim for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
4744351df4b2SJaegeuk Kim struct curseg_info *curseg_t = CURSEG_I(sbi, type);
47455f029c04SYi Zhuang
4746351df4b2SJaegeuk Kim __set_test_and_inuse(sbi, curseg_t->segno);
4747351df4b2SJaegeuk Kim }
4748351df4b2SJaegeuk Kim }
4749351df4b2SJaegeuk Kim
init_dirty_segmap(struct f2fs_sb_info * sbi)4750351df4b2SJaegeuk Kim static void init_dirty_segmap(struct f2fs_sb_info *sbi)
4751351df4b2SJaegeuk Kim {
4752351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4753351df4b2SJaegeuk Kim struct free_segmap_info *free_i = FREE_I(sbi);
4754da52f8adSJack Qiu unsigned int segno = 0, offset = 0, secno;
4755de881df9SAravind Ramesh block_t valid_blocks, usable_blks_in_seg;
4756351df4b2SJaegeuk Kim
47578736fbf0SNamjae Jeon while (1) {
4758351df4b2SJaegeuk Kim /* find dirty segment based on free segmap */
47597cd8558bSJaegeuk Kim segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
47607cd8558bSJaegeuk Kim if (segno >= MAIN_SEGS(sbi))
4761351df4b2SJaegeuk Kim break;
4762351df4b2SJaegeuk Kim offset = segno + 1;
4763302bd348SJaegeuk Kim valid_blocks = get_valid_blocks(sbi, segno, false);
4764de881df9SAravind Ramesh usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
4765de881df9SAravind Ramesh if (valid_blocks == usable_blks_in_seg || !valid_blocks)
4766351df4b2SJaegeuk Kim continue;
4767de881df9SAravind Ramesh if (valid_blocks > usable_blks_in_seg) {
4768ec325b52SJaegeuk Kim f2fs_bug_on(sbi, 1);
4769ec325b52SJaegeuk Kim continue;
4770ec325b52SJaegeuk Kim }
4771351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock);
4772351df4b2SJaegeuk Kim __locate_dirty_segment(sbi, segno, DIRTY);
4773351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock);
4774351df4b2SJaegeuk Kim }
4775da52f8adSJack Qiu
4776da52f8adSJack Qiu if (!__is_large_section(sbi))
4777da52f8adSJack Qiu return;
4778da52f8adSJack Qiu
4779da52f8adSJack Qiu mutex_lock(&dirty_i->seglist_lock);
4780f0248ba6SJaegeuk Kim for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
4781da52f8adSJack Qiu valid_blocks = get_valid_blocks(sbi, segno, true);
4782da52f8adSJack Qiu secno = GET_SEC_FROM_SEG(sbi, segno);
4783da52f8adSJack Qiu
4784074b5ea2SJaegeuk Kim if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi))
4785da52f8adSJack Qiu continue;
4786da52f8adSJack Qiu if (IS_CURSEC(sbi, secno))
4787da52f8adSJack Qiu continue;
4788da52f8adSJack Qiu set_bit(secno, dirty_i->dirty_secmap);
4789da52f8adSJack Qiu }
4790da52f8adSJack Qiu mutex_unlock(&dirty_i->seglist_lock);
4791351df4b2SJaegeuk Kim }
4792351df4b2SJaegeuk Kim
init_victim_secmap(struct f2fs_sb_info * sbi)47935ec4e49fSJaegeuk Kim static int init_victim_secmap(struct f2fs_sb_info *sbi)
4794351df4b2SJaegeuk Kim {
4795351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
47967cd8558bSJaegeuk Kim unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4797351df4b2SJaegeuk Kim
4798628b3d14SChao Yu dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
47995ec4e49fSJaegeuk Kim if (!dirty_i->victim_secmap)
4800351df4b2SJaegeuk Kim return -ENOMEM;
480171419129SChao Yu
480271419129SChao Yu dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
480371419129SChao Yu if (!dirty_i->pinned_secmap)
480471419129SChao Yu return -ENOMEM;
480571419129SChao Yu
480671419129SChao Yu dirty_i->pinned_secmap_cnt = 0;
480771419129SChao Yu dirty_i->enable_pin_section = true;
4808351df4b2SJaegeuk Kim return 0;
4809351df4b2SJaegeuk Kim }
4810351df4b2SJaegeuk Kim
build_dirty_segmap(struct f2fs_sb_info * sbi)4811351df4b2SJaegeuk Kim static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4812351df4b2SJaegeuk Kim {
4813351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i;
4814351df4b2SJaegeuk Kim unsigned int bitmap_size, i;
4815351df4b2SJaegeuk Kim
4816351df4b2SJaegeuk Kim /* allocate memory for dirty segments list information */
4817acbf054dSChao Yu dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
4818acbf054dSChao Yu GFP_KERNEL);
4819351df4b2SJaegeuk Kim if (!dirty_i)
4820351df4b2SJaegeuk Kim return -ENOMEM;
4821351df4b2SJaegeuk Kim
4822351df4b2SJaegeuk Kim SM_I(sbi)->dirty_info = dirty_i;
4823351df4b2SJaegeuk Kim mutex_init(&dirty_i->seglist_lock);
4824351df4b2SJaegeuk Kim
48257cd8558bSJaegeuk Kim bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4826351df4b2SJaegeuk Kim
4827351df4b2SJaegeuk Kim for (i = 0; i < NR_DIRTY_TYPE; i++) {
4828628b3d14SChao Yu dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
4829628b3d14SChao Yu GFP_KERNEL);
4830351df4b2SJaegeuk Kim if (!dirty_i->dirty_segmap[i])
4831351df4b2SJaegeuk Kim return -ENOMEM;
4832351df4b2SJaegeuk Kim }
4833351df4b2SJaegeuk Kim
4834da52f8adSJack Qiu if (__is_large_section(sbi)) {
4835da52f8adSJack Qiu bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4836da52f8adSJack Qiu dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
4837da52f8adSJack Qiu bitmap_size, GFP_KERNEL);
4838da52f8adSJack Qiu if (!dirty_i->dirty_secmap)
4839da52f8adSJack Qiu return -ENOMEM;
4840da52f8adSJack Qiu }
4841da52f8adSJack Qiu
4842351df4b2SJaegeuk Kim init_dirty_segmap(sbi);
48435ec4e49fSJaegeuk Kim return init_victim_secmap(sbi);
4844351df4b2SJaegeuk Kim }
4845351df4b2SJaegeuk Kim
sanity_check_curseg(struct f2fs_sb_info * sbi)4846c854f4d6SChao Yu static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4847c854f4d6SChao Yu {
4848c854f4d6SChao Yu int i;
4849c854f4d6SChao Yu
4850c854f4d6SChao Yu /*
4851c854f4d6SChao Yu * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
4852c854f4d6SChao Yu * In LFS curseg, all blkaddr after .next_blkoff should be unused.
4853c854f4d6SChao Yu */
4854d0b9e42aSChao Yu for (i = 0; i < NR_PERSISTENT_LOG; i++) {
4855c854f4d6SChao Yu struct curseg_info *curseg = CURSEG_I(sbi, i);
4856c854f4d6SChao Yu struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4857c854f4d6SChao Yu unsigned int blkofs = curseg->next_blkoff;
4858c854f4d6SChao Yu
4859a7d9fe3cSJaegeuk Kim if (f2fs_sb_has_readonly(sbi) &&
4860a7d9fe3cSJaegeuk Kim i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
4861a7d9fe3cSJaegeuk Kim continue;
4862a7d9fe3cSJaegeuk Kim
4863093749e2SChao Yu sanity_check_seg_type(sbi, curseg->seg_type);
4864093749e2SChao Yu
4865f41ee8b9SChao Yu if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) {
4866f41ee8b9SChao Yu f2fs_err(sbi,
4867f41ee8b9SChao Yu "Current segment has invalid alloc_type:%d",
4868f41ee8b9SChao Yu curseg->alloc_type);
486995fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
4870f41ee8b9SChao Yu return -EFSCORRUPTED;
4871f41ee8b9SChao Yu }
4872f41ee8b9SChao Yu
4873c854f4d6SChao Yu if (f2fs_test_bit(blkofs, se->cur_valid_map))
4874c854f4d6SChao Yu goto out;
4875c854f4d6SChao Yu
4876c854f4d6SChao Yu if (curseg->alloc_type == SSR)
4877c854f4d6SChao Yu continue;
4878c854f4d6SChao Yu
4879f0248ba6SJaegeuk Kim for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) {
4880c854f4d6SChao Yu if (!f2fs_test_bit(blkofs, se->cur_valid_map))
4881c854f4d6SChao Yu continue;
4882c854f4d6SChao Yu out:
4883dcbb4c10SJoe Perches f2fs_err(sbi,
4884dcbb4c10SJoe Perches "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
4885c854f4d6SChao Yu i, curseg->segno, curseg->alloc_type,
4886c854f4d6SChao Yu curseg->next_blkoff, blkofs);
488795fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
488810f966bbSChao Yu return -EFSCORRUPTED;
4889c854f4d6SChao Yu }
4890c854f4d6SChao Yu }
4891c854f4d6SChao Yu return 0;
4892c854f4d6SChao Yu }
4893c854f4d6SChao Yu
4894c426d991SShin'ichiro Kawasaki #ifdef CONFIG_BLK_DEV_ZONED
4895c426d991SShin'ichiro Kawasaki
check_zone_write_pointer(struct f2fs_sb_info * sbi,struct f2fs_dev_info * fdev,struct blk_zone * zone)4896d508c94eSShin'ichiro Kawasaki static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
4897d508c94eSShin'ichiro Kawasaki struct f2fs_dev_info *fdev,
4898d508c94eSShin'ichiro Kawasaki struct blk_zone *zone)
4899d508c94eSShin'ichiro Kawasaki {
4900d508c94eSShin'ichiro Kawasaki unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
4901d508c94eSShin'ichiro Kawasaki block_t zone_block, wp_block, last_valid_block;
4902579c7e41SJaegeuk Kim unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4903d508c94eSShin'ichiro Kawasaki int i, s, b, ret;
4904d508c94eSShin'ichiro Kawasaki struct seg_entry *se;
4905d508c94eSShin'ichiro Kawasaki
4906d508c94eSShin'ichiro Kawasaki if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4907d508c94eSShin'ichiro Kawasaki return 0;
4908d508c94eSShin'ichiro Kawasaki
4909579c7e41SJaegeuk Kim wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
4910d508c94eSShin'ichiro Kawasaki wp_segno = GET_SEGNO(sbi, wp_block);
4911d508c94eSShin'ichiro Kawasaki wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4912579c7e41SJaegeuk Kim zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
4913d508c94eSShin'ichiro Kawasaki zone_segno = GET_SEGNO(sbi, zone_block);
4914d508c94eSShin'ichiro Kawasaki zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
4915d508c94eSShin'ichiro Kawasaki
4916d508c94eSShin'ichiro Kawasaki if (zone_segno >= MAIN_SEGS(sbi))
4917d508c94eSShin'ichiro Kawasaki return 0;
4918d508c94eSShin'ichiro Kawasaki
4919d508c94eSShin'ichiro Kawasaki /*
4920d508c94eSShin'ichiro Kawasaki * Skip check of zones cursegs point to, since
4921d508c94eSShin'ichiro Kawasaki * fix_curseg_write_pointer() checks them.
4922d508c94eSShin'ichiro Kawasaki */
4923d508c94eSShin'ichiro Kawasaki for (i = 0; i < NO_CHECK_TYPE; i++)
4924d508c94eSShin'ichiro Kawasaki if (zone_secno == GET_SEC_FROM_SEG(sbi,
4925d508c94eSShin'ichiro Kawasaki CURSEG_I(sbi, i)->segno))
4926d508c94eSShin'ichiro Kawasaki return 0;
4927d508c94eSShin'ichiro Kawasaki
4928d508c94eSShin'ichiro Kawasaki /*
4929d508c94eSShin'ichiro Kawasaki * Get last valid block of the zone.
4930d508c94eSShin'ichiro Kawasaki */
4931d508c94eSShin'ichiro Kawasaki last_valid_block = zone_block - 1;
4932d508c94eSShin'ichiro Kawasaki for (s = sbi->segs_per_sec - 1; s >= 0; s--) {
4933d508c94eSShin'ichiro Kawasaki segno = zone_segno + s;
4934d508c94eSShin'ichiro Kawasaki se = get_seg_entry(sbi, segno);
4935d508c94eSShin'ichiro Kawasaki for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
4936d508c94eSShin'ichiro Kawasaki if (f2fs_test_bit(b, se->cur_valid_map)) {
4937d508c94eSShin'ichiro Kawasaki last_valid_block = START_BLOCK(sbi, segno) + b;
4938d508c94eSShin'ichiro Kawasaki break;
4939d508c94eSShin'ichiro Kawasaki }
4940d508c94eSShin'ichiro Kawasaki if (last_valid_block >= zone_block)
4941d508c94eSShin'ichiro Kawasaki break;
4942d508c94eSShin'ichiro Kawasaki }
4943d508c94eSShin'ichiro Kawasaki
4944c9667b19SDaeho Jeong /*
4945c9667b19SDaeho Jeong * The write pointer matches with the valid blocks or
4946c9667b19SDaeho Jeong * already points to the end of the zone.
4947c9667b19SDaeho Jeong */
4948c9667b19SDaeho Jeong if ((last_valid_block + 1 == wp_block) ||
4949c9667b19SDaeho Jeong (zone->wp == zone->start + zone->len))
4950d508c94eSShin'ichiro Kawasaki return 0;
4951d508c94eSShin'ichiro Kawasaki
495204abeb69SDaeho Jeong if (last_valid_block + 1 == zone_block) {
4953d508c94eSShin'ichiro Kawasaki /*
495404abeb69SDaeho Jeong * If there is no valid block in the zone and if write pointer
495504abeb69SDaeho Jeong * is not at zone start, reset the write pointer.
4956d508c94eSShin'ichiro Kawasaki */
4957d508c94eSShin'ichiro Kawasaki f2fs_notice(sbi,
4958d508c94eSShin'ichiro Kawasaki "Zone without valid block has non-zero write "
4959d508c94eSShin'ichiro Kawasaki "pointer. Reset the write pointer: wp[0x%x,0x%x]",
4960d508c94eSShin'ichiro Kawasaki wp_segno, wp_blkoff);
4961d508c94eSShin'ichiro Kawasaki ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
4962579c7e41SJaegeuk Kim zone->len >> log_sectors_per_block);
496304abeb69SDaeho Jeong if (ret)
4964d508c94eSShin'ichiro Kawasaki f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4965d508c94eSShin'ichiro Kawasaki fdev->path, ret);
496604abeb69SDaeho Jeong
4967d508c94eSShin'ichiro Kawasaki return ret;
4968d508c94eSShin'ichiro Kawasaki }
4969d508c94eSShin'ichiro Kawasaki
497004abeb69SDaeho Jeong /*
497104abeb69SDaeho Jeong * If there are valid blocks and the write pointer doesn't
497204abeb69SDaeho Jeong * match with them, we need to report the inconsistency and
497304abeb69SDaeho Jeong * fill the zone till the end to close the zone. This inconsistency
497404abeb69SDaeho Jeong * does not cause write error because the zone will not be selected
497504abeb69SDaeho Jeong * for write operation until it get discarded.
497604abeb69SDaeho Jeong */
497704abeb69SDaeho Jeong f2fs_notice(sbi, "Valid blocks are not aligned with write pointer: "
497804abeb69SDaeho Jeong "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
497904abeb69SDaeho Jeong GET_SEGNO(sbi, last_valid_block),
498004abeb69SDaeho Jeong GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
498104abeb69SDaeho Jeong wp_segno, wp_blkoff);
498204abeb69SDaeho Jeong
49833b716612SDaeho Jeong ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH,
49843b716612SDaeho Jeong zone->start, zone->len, GFP_NOFS);
49853b716612SDaeho Jeong if (ret == -EOPNOTSUPP) {
498604abeb69SDaeho Jeong ret = blkdev_issue_zeroout(fdev->bdev, zone->wp,
498704abeb69SDaeho Jeong zone->len - (zone->wp - zone->start),
498804abeb69SDaeho Jeong GFP_NOFS, 0);
498904abeb69SDaeho Jeong if (ret)
499004abeb69SDaeho Jeong f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)",
499104abeb69SDaeho Jeong fdev->path, ret);
49923b716612SDaeho Jeong } else if (ret) {
49933b716612SDaeho Jeong f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)",
49943b716612SDaeho Jeong fdev->path, ret);
49953b716612SDaeho Jeong }
499604abeb69SDaeho Jeong
499704abeb69SDaeho Jeong return ret;
4998d508c94eSShin'ichiro Kawasaki }
4999d508c94eSShin'ichiro Kawasaki
get_target_zoned_dev(struct f2fs_sb_info * sbi,block_t zone_blkaddr)5000c426d991SShin'ichiro Kawasaki static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
5001c426d991SShin'ichiro Kawasaki block_t zone_blkaddr)
5002c426d991SShin'ichiro Kawasaki {
5003c426d991SShin'ichiro Kawasaki int i;
5004c426d991SShin'ichiro Kawasaki
5005c426d991SShin'ichiro Kawasaki for (i = 0; i < sbi->s_ndevs; i++) {
5006c426d991SShin'ichiro Kawasaki if (!bdev_is_zoned(FDEV(i).bdev))
5007c426d991SShin'ichiro Kawasaki continue;
5008c426d991SShin'ichiro Kawasaki if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
5009c426d991SShin'ichiro Kawasaki zone_blkaddr <= FDEV(i).end_blk))
5010c426d991SShin'ichiro Kawasaki return &FDEV(i);
5011c426d991SShin'ichiro Kawasaki }
5012c426d991SShin'ichiro Kawasaki
5013c426d991SShin'ichiro Kawasaki return NULL;
5014c426d991SShin'ichiro Kawasaki }
5015c426d991SShin'ichiro Kawasaki
report_one_zone_cb(struct blk_zone * zone,unsigned int idx,void * data)5016c426d991SShin'ichiro Kawasaki static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
50175f029c04SYi Zhuang void *data)
50185f029c04SYi Zhuang {
5019c426d991SShin'ichiro Kawasaki memcpy(data, zone, sizeof(struct blk_zone));
5020c426d991SShin'ichiro Kawasaki return 0;
5021c426d991SShin'ichiro Kawasaki }
5022c426d991SShin'ichiro Kawasaki
fix_curseg_write_pointer(struct f2fs_sb_info * sbi,int type)5023c426d991SShin'ichiro Kawasaki static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
5024c426d991SShin'ichiro Kawasaki {
5025c426d991SShin'ichiro Kawasaki struct curseg_info *cs = CURSEG_I(sbi, type);
5026c426d991SShin'ichiro Kawasaki struct f2fs_dev_info *zbd;
5027c426d991SShin'ichiro Kawasaki struct blk_zone zone;
5028c426d991SShin'ichiro Kawasaki unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
5029c426d991SShin'ichiro Kawasaki block_t cs_zone_block, wp_block;
5030579c7e41SJaegeuk Kim unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
5031c426d991SShin'ichiro Kawasaki sector_t zone_sector;
5032c426d991SShin'ichiro Kawasaki int err;
5033c426d991SShin'ichiro Kawasaki
5034c426d991SShin'ichiro Kawasaki cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5035c426d991SShin'ichiro Kawasaki cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5036c426d991SShin'ichiro Kawasaki
5037c426d991SShin'ichiro Kawasaki zbd = get_target_zoned_dev(sbi, cs_zone_block);
5038c426d991SShin'ichiro Kawasaki if (!zbd)
5039c426d991SShin'ichiro Kawasaki return 0;
5040c426d991SShin'ichiro Kawasaki
5041c426d991SShin'ichiro Kawasaki /* report zone for the sector the curseg points to */
5042579c7e41SJaegeuk Kim zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
5043579c7e41SJaegeuk Kim << log_sectors_per_block;
5044c426d991SShin'ichiro Kawasaki err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
5045c426d991SShin'ichiro Kawasaki report_one_zone_cb, &zone);
5046c426d991SShin'ichiro Kawasaki if (err != 1) {
5047c426d991SShin'ichiro Kawasaki f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5048c426d991SShin'ichiro Kawasaki zbd->path, err);
5049c426d991SShin'ichiro Kawasaki return err;
5050c426d991SShin'ichiro Kawasaki }
5051c426d991SShin'ichiro Kawasaki
5052c426d991SShin'ichiro Kawasaki if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
5053c426d991SShin'ichiro Kawasaki return 0;
5054c426d991SShin'ichiro Kawasaki
5055579c7e41SJaegeuk Kim wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
5056c426d991SShin'ichiro Kawasaki wp_segno = GET_SEGNO(sbi, wp_block);
5057c426d991SShin'ichiro Kawasaki wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
5058579c7e41SJaegeuk Kim wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
5059c426d991SShin'ichiro Kawasaki
5060c426d991SShin'ichiro Kawasaki if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
5061c426d991SShin'ichiro Kawasaki wp_sector_off == 0)
5062c426d991SShin'ichiro Kawasaki return 0;
5063c426d991SShin'ichiro Kawasaki
5064c426d991SShin'ichiro Kawasaki f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
5065c426d991SShin'ichiro Kawasaki "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
5066c426d991SShin'ichiro Kawasaki type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff);
5067c426d991SShin'ichiro Kawasaki
5068c426d991SShin'ichiro Kawasaki f2fs_notice(sbi, "Assign new section to curseg[%d]: "
5069c426d991SShin'ichiro Kawasaki "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff);
5070509f1010SChao Yu
5071509f1010SChao Yu f2fs_allocate_new_section(sbi, type, true);
5072c426d991SShin'ichiro Kawasaki
5073d508c94eSShin'ichiro Kawasaki /* check consistency of the zone curseg pointed to */
5074d508c94eSShin'ichiro Kawasaki if (check_zone_write_pointer(sbi, zbd, &zone))
5075d508c94eSShin'ichiro Kawasaki return -EIO;
5076d508c94eSShin'ichiro Kawasaki
5077c426d991SShin'ichiro Kawasaki /* check newly assigned zone */
5078c426d991SShin'ichiro Kawasaki cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5079c426d991SShin'ichiro Kawasaki cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5080c426d991SShin'ichiro Kawasaki
5081c426d991SShin'ichiro Kawasaki zbd = get_target_zoned_dev(sbi, cs_zone_block);
5082c426d991SShin'ichiro Kawasaki if (!zbd)
5083c426d991SShin'ichiro Kawasaki return 0;
5084c426d991SShin'ichiro Kawasaki
5085579c7e41SJaegeuk Kim zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
5086579c7e41SJaegeuk Kim << log_sectors_per_block;
5087c426d991SShin'ichiro Kawasaki err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
5088c426d991SShin'ichiro Kawasaki report_one_zone_cb, &zone);
5089c426d991SShin'ichiro Kawasaki if (err != 1) {
5090c426d991SShin'ichiro Kawasaki f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5091c426d991SShin'ichiro Kawasaki zbd->path, err);
5092c426d991SShin'ichiro Kawasaki return err;
5093c426d991SShin'ichiro Kawasaki }
5094c426d991SShin'ichiro Kawasaki
5095c426d991SShin'ichiro Kawasaki if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
5096c426d991SShin'ichiro Kawasaki return 0;
5097c426d991SShin'ichiro Kawasaki
5098c426d991SShin'ichiro Kawasaki if (zone.wp != zone.start) {
5099c426d991SShin'ichiro Kawasaki f2fs_notice(sbi,
5100c426d991SShin'ichiro Kawasaki "New zone for curseg[%d] is not yet discarded. "
5101c426d991SShin'ichiro Kawasaki "Reset the zone: curseg[0x%x,0x%x]",
5102c426d991SShin'ichiro Kawasaki type, cs->segno, cs->next_blkoff);
51031ac3d037SDaeho Jeong err = __f2fs_issue_discard_zone(sbi, zbd->bdev, cs_zone_block,
5104579c7e41SJaegeuk Kim zone.len >> log_sectors_per_block);
5105c426d991SShin'ichiro Kawasaki if (err) {
5106c426d991SShin'ichiro Kawasaki f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5107c426d991SShin'ichiro Kawasaki zbd->path, err);
5108c426d991SShin'ichiro Kawasaki return err;
5109c426d991SShin'ichiro Kawasaki }
5110c426d991SShin'ichiro Kawasaki }
5111c426d991SShin'ichiro Kawasaki
5112c426d991SShin'ichiro Kawasaki return 0;
5113c426d991SShin'ichiro Kawasaki }
5114c426d991SShin'ichiro Kawasaki
f2fs_fix_curseg_write_pointer(struct f2fs_sb_info * sbi)5115c426d991SShin'ichiro Kawasaki int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5116c426d991SShin'ichiro Kawasaki {
5117c426d991SShin'ichiro Kawasaki int i, ret;
5118c426d991SShin'ichiro Kawasaki
5119d0b9e42aSChao Yu for (i = 0; i < NR_PERSISTENT_LOG; i++) {
5120c426d991SShin'ichiro Kawasaki ret = fix_curseg_write_pointer(sbi, i);
5121c426d991SShin'ichiro Kawasaki if (ret)
5122c426d991SShin'ichiro Kawasaki return ret;
5123c426d991SShin'ichiro Kawasaki }
5124c426d991SShin'ichiro Kawasaki
5125c426d991SShin'ichiro Kawasaki return 0;
5126c426d991SShin'ichiro Kawasaki }
5127d508c94eSShin'ichiro Kawasaki
5128d508c94eSShin'ichiro Kawasaki struct check_zone_write_pointer_args {
5129d508c94eSShin'ichiro Kawasaki struct f2fs_sb_info *sbi;
5130d508c94eSShin'ichiro Kawasaki struct f2fs_dev_info *fdev;
5131d508c94eSShin'ichiro Kawasaki };
5132d508c94eSShin'ichiro Kawasaki
check_zone_write_pointer_cb(struct blk_zone * zone,unsigned int idx,void * data)5133d508c94eSShin'ichiro Kawasaki static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
51345f029c04SYi Zhuang void *data)
51355f029c04SYi Zhuang {
5136d508c94eSShin'ichiro Kawasaki struct check_zone_write_pointer_args *args;
51375f029c04SYi Zhuang
5138d508c94eSShin'ichiro Kawasaki args = (struct check_zone_write_pointer_args *)data;
5139d508c94eSShin'ichiro Kawasaki
5140d508c94eSShin'ichiro Kawasaki return check_zone_write_pointer(args->sbi, args->fdev, zone);
5141d508c94eSShin'ichiro Kawasaki }
5142d508c94eSShin'ichiro Kawasaki
f2fs_check_write_pointer(struct f2fs_sb_info * sbi)5143d508c94eSShin'ichiro Kawasaki int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5144d508c94eSShin'ichiro Kawasaki {
5145d508c94eSShin'ichiro Kawasaki int i, ret;
5146d508c94eSShin'ichiro Kawasaki struct check_zone_write_pointer_args args;
5147d508c94eSShin'ichiro Kawasaki
5148d508c94eSShin'ichiro Kawasaki for (i = 0; i < sbi->s_ndevs; i++) {
5149d508c94eSShin'ichiro Kawasaki if (!bdev_is_zoned(FDEV(i).bdev))
5150d508c94eSShin'ichiro Kawasaki continue;
5151d508c94eSShin'ichiro Kawasaki
5152d508c94eSShin'ichiro Kawasaki args.sbi = sbi;
5153d508c94eSShin'ichiro Kawasaki args.fdev = &FDEV(i);
5154d508c94eSShin'ichiro Kawasaki ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
5155d508c94eSShin'ichiro Kawasaki check_zone_write_pointer_cb, &args);
5156d508c94eSShin'ichiro Kawasaki if (ret < 0)
5157d508c94eSShin'ichiro Kawasaki return ret;
5158d508c94eSShin'ichiro Kawasaki }
5159d508c94eSShin'ichiro Kawasaki
5160d508c94eSShin'ichiro Kawasaki return 0;
5161d508c94eSShin'ichiro Kawasaki }
5162de881df9SAravind Ramesh
5163de881df9SAravind Ramesh /*
5164de881df9SAravind Ramesh * Return the number of usable blocks in a segment. The number of blocks
5165de881df9SAravind Ramesh * returned is always equal to the number of blocks in a segment for
5166de881df9SAravind Ramesh * segments fully contained within a sequential zone capacity or a
5167de881df9SAravind Ramesh * conventional zone. For segments partially contained in a sequential
5168de881df9SAravind Ramesh * zone capacity, the number of usable blocks up to the zone capacity
5169de881df9SAravind Ramesh * is returned. 0 is returned in all other cases.
5170de881df9SAravind Ramesh */
f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info * sbi,unsigned int segno)5171de881df9SAravind Ramesh static inline unsigned int f2fs_usable_zone_blks_in_seg(
5172de881df9SAravind Ramesh struct f2fs_sb_info *sbi, unsigned int segno)
5173de881df9SAravind Ramesh {
5174de881df9SAravind Ramesh block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
51750b37ed21SJaegeuk Kim unsigned int secno;
5176de881df9SAravind Ramesh
5177b771aadcSJaegeuk Kim if (!sbi->unusable_blocks_per_sec)
5178f0248ba6SJaegeuk Kim return BLKS_PER_SEG(sbi);
5179de881df9SAravind Ramesh
51800b37ed21SJaegeuk Kim secno = GET_SEC_FROM_SEG(sbi, segno);
51810b37ed21SJaegeuk Kim seg_start = START_BLOCK(sbi, segno);
5182de881df9SAravind Ramesh sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5183b771aadcSJaegeuk Kim sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
5184de881df9SAravind Ramesh
5185de881df9SAravind Ramesh /*
5186de881df9SAravind Ramesh * If segment starts before zone capacity and spans beyond
5187de881df9SAravind Ramesh * zone capacity, then usable blocks are from seg start to
5188de881df9SAravind Ramesh * zone capacity. If the segment starts after the zone capacity,
5189de881df9SAravind Ramesh * then there are no usable blocks.
5190de881df9SAravind Ramesh */
5191de881df9SAravind Ramesh if (seg_start >= sec_cap_blkaddr)
5192de881df9SAravind Ramesh return 0;
5193f0248ba6SJaegeuk Kim if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr)
5194de881df9SAravind Ramesh return sec_cap_blkaddr - seg_start;
5195de881df9SAravind Ramesh
5196f0248ba6SJaegeuk Kim return BLKS_PER_SEG(sbi);
5197de881df9SAravind Ramesh }
5198c426d991SShin'ichiro Kawasaki #else
f2fs_fix_curseg_write_pointer(struct f2fs_sb_info * sbi)5199c426d991SShin'ichiro Kawasaki int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5200c426d991SShin'ichiro Kawasaki {
5201c426d991SShin'ichiro Kawasaki return 0;
5202c426d991SShin'ichiro Kawasaki }
5203d508c94eSShin'ichiro Kawasaki
f2fs_check_write_pointer(struct f2fs_sb_info * sbi)5204d508c94eSShin'ichiro Kawasaki int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5205d508c94eSShin'ichiro Kawasaki {
5206d508c94eSShin'ichiro Kawasaki return 0;
5207d508c94eSShin'ichiro Kawasaki }
5208de881df9SAravind Ramesh
f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info * sbi,unsigned int segno)5209de881df9SAravind Ramesh static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5210de881df9SAravind Ramesh unsigned int segno)
5211de881df9SAravind Ramesh {
5212de881df9SAravind Ramesh return 0;
5213de881df9SAravind Ramesh }
5214de881df9SAravind Ramesh
5215c426d991SShin'ichiro Kawasaki #endif
f2fs_usable_blks_in_seg(struct f2fs_sb_info * sbi,unsigned int segno)5216de881df9SAravind Ramesh unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5217de881df9SAravind Ramesh unsigned int segno)
5218de881df9SAravind Ramesh {
5219de881df9SAravind Ramesh if (f2fs_sb_has_blkzoned(sbi))
5220de881df9SAravind Ramesh return f2fs_usable_zone_blks_in_seg(sbi, segno);
5221de881df9SAravind Ramesh
5222f0248ba6SJaegeuk Kim return BLKS_PER_SEG(sbi);
5223de881df9SAravind Ramesh }
5224de881df9SAravind Ramesh
f2fs_usable_segs_in_sec(struct f2fs_sb_info * sbi,unsigned int segno)5225de881df9SAravind Ramesh unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
5226de881df9SAravind Ramesh unsigned int segno)
5227de881df9SAravind Ramesh {
5228de881df9SAravind Ramesh if (f2fs_sb_has_blkzoned(sbi))
52290b37ed21SJaegeuk Kim return CAP_SEGS_PER_SEC(sbi);
5230de881df9SAravind Ramesh
5231f0248ba6SJaegeuk Kim return SEGS_PER_SEC(sbi);
5232de881df9SAravind Ramesh }
5233c426d991SShin'ichiro Kawasaki
52340a8165d7SJaegeuk Kim /*
5235351df4b2SJaegeuk Kim * Update min, max modified time for cost-benefit GC algorithm
5236351df4b2SJaegeuk Kim */
init_min_max_mtime(struct f2fs_sb_info * sbi)5237351df4b2SJaegeuk Kim static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5238351df4b2SJaegeuk Kim {
5239351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi);
5240351df4b2SJaegeuk Kim unsigned int segno;
5241351df4b2SJaegeuk Kim
52423d26fa6bSChao Yu down_write(&sit_i->sentry_lock);
5243351df4b2SJaegeuk Kim
52445ad25442SChao Yu sit_i->min_mtime = ULLONG_MAX;
5245351df4b2SJaegeuk Kim
5246f0248ba6SJaegeuk Kim for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
5247351df4b2SJaegeuk Kim unsigned int i;
5248351df4b2SJaegeuk Kim unsigned long long mtime = 0;
5249351df4b2SJaegeuk Kim
5250f0248ba6SJaegeuk Kim for (i = 0; i < SEGS_PER_SEC(sbi); i++)
5251351df4b2SJaegeuk Kim mtime += get_seg_entry(sbi, segno + i)->mtime;
5252351df4b2SJaegeuk Kim
5253f0248ba6SJaegeuk Kim mtime = div_u64(mtime, SEGS_PER_SEC(sbi));
5254351df4b2SJaegeuk Kim
5255351df4b2SJaegeuk Kim if (sit_i->min_mtime > mtime)
5256351df4b2SJaegeuk Kim sit_i->min_mtime = mtime;
5257351df4b2SJaegeuk Kim }
5258a1f72ac2SChao Yu sit_i->max_mtime = get_mtime(sbi, false);
5259093749e2SChao Yu sit_i->dirty_max_mtime = 0;
52603d26fa6bSChao Yu up_write(&sit_i->sentry_lock);
5261351df4b2SJaegeuk Kim }
5262351df4b2SJaegeuk Kim
f2fs_build_segment_manager(struct f2fs_sb_info * sbi)52634d57b86dSChao Yu int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5264351df4b2SJaegeuk Kim {
5265351df4b2SJaegeuk Kim struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5266351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
52671042d60fSNamjae Jeon struct f2fs_sm_info *sm_info;
5268351df4b2SJaegeuk Kim int err;
5269351df4b2SJaegeuk Kim
5270acbf054dSChao Yu sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5271351df4b2SJaegeuk Kim if (!sm_info)
5272351df4b2SJaegeuk Kim return -ENOMEM;
5273351df4b2SJaegeuk Kim
5274351df4b2SJaegeuk Kim /* init sm info */
5275351df4b2SJaegeuk Kim sbi->sm_info = sm_info;
5276351df4b2SJaegeuk Kim sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
5277351df4b2SJaegeuk Kim sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
5278351df4b2SJaegeuk Kim sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
5279351df4b2SJaegeuk Kim sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
5280351df4b2SJaegeuk Kim sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
5281351df4b2SJaegeuk Kim sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
5282351df4b2SJaegeuk Kim sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
528358c41035SJaegeuk Kim sm_info->rec_prefree_segments = sm_info->main_segments *
528458c41035SJaegeuk Kim DEF_RECLAIM_PREFREE_SEGMENTS / 100;
528544a83499SJaegeuk Kim if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
528644a83499SJaegeuk Kim sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
528744a83499SJaegeuk Kim
5288b0332a0fSChao Yu if (!f2fs_lfs_mode(sbi))
5289fdb7ccc3SYangtao Li sm_info->ipu_policy = BIT(F2FS_IPU_FSYNC);
5290216fbd64SJaegeuk Kim sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
5291c1ce1b02SJaegeuk Kim sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
5292f0248ba6SJaegeuk Kim sm_info->min_seq_blocks = BLKS_PER_SEG(sbi);
5293ef095d19SJaegeuk Kim sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
5294a2a12b67SChao Yu sm_info->min_ssr_sections = reserved_sections(sbi);
5295351df4b2SJaegeuk Kim
5296184a5cd2SChao Yu INIT_LIST_HEAD(&sm_info->sit_entry_set);
5297184a5cd2SChao Yu
5298e4544b63STim Murray init_f2fs_rwsem(&sm_info->curseg_lock);
52992b60311dSChao Yu
53004d57b86dSChao Yu err = f2fs_create_flush_cmd_control(sbi);
53012163d198SGu Zheng if (err)
5302a688b9d9SGu Zheng return err;
53036b4afdd7SJaegeuk Kim
53040b54fb84SJaegeuk Kim err = create_discard_cmd_control(sbi);
53050b54fb84SJaegeuk Kim if (err)
53060b54fb84SJaegeuk Kim return err;
53070b54fb84SJaegeuk Kim
5308351df4b2SJaegeuk Kim err = build_sit_info(sbi);
5309351df4b2SJaegeuk Kim if (err)
5310351df4b2SJaegeuk Kim return err;
5311351df4b2SJaegeuk Kim err = build_free_segmap(sbi);
5312351df4b2SJaegeuk Kim if (err)
5313351df4b2SJaegeuk Kim return err;
5314351df4b2SJaegeuk Kim err = build_curseg(sbi);
5315351df4b2SJaegeuk Kim if (err)
5316351df4b2SJaegeuk Kim return err;
5317351df4b2SJaegeuk Kim
5318351df4b2SJaegeuk Kim /* reinit free segmap based on SIT */
5319c39a1b34SJaegeuk Kim err = build_sit_entries(sbi);
5320c39a1b34SJaegeuk Kim if (err)
5321c39a1b34SJaegeuk Kim return err;
5322351df4b2SJaegeuk Kim
5323351df4b2SJaegeuk Kim init_free_segmap(sbi);
5324351df4b2SJaegeuk Kim err = build_dirty_segmap(sbi);
5325351df4b2SJaegeuk Kim if (err)
5326351df4b2SJaegeuk Kim return err;
5327351df4b2SJaegeuk Kim
5328c854f4d6SChao Yu err = sanity_check_curseg(sbi);
5329c854f4d6SChao Yu if (err)
5330c854f4d6SChao Yu return err;
5331c854f4d6SChao Yu
5332351df4b2SJaegeuk Kim init_min_max_mtime(sbi);
5333351df4b2SJaegeuk Kim return 0;
5334351df4b2SJaegeuk Kim }
5335351df4b2SJaegeuk Kim
discard_dirty_segmap(struct f2fs_sb_info * sbi,enum dirty_type dirty_type)5336351df4b2SJaegeuk Kim static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5337351df4b2SJaegeuk Kim enum dirty_type dirty_type)
5338351df4b2SJaegeuk Kim {
5339351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5340351df4b2SJaegeuk Kim
5341351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock);
534239307a8eSJaegeuk Kim kvfree(dirty_i->dirty_segmap[dirty_type]);
5343351df4b2SJaegeuk Kim dirty_i->nr_dirty[dirty_type] = 0;
5344351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock);
5345351df4b2SJaegeuk Kim }
5346351df4b2SJaegeuk Kim
destroy_victim_secmap(struct f2fs_sb_info * sbi)53475ec4e49fSJaegeuk Kim static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5348351df4b2SJaegeuk Kim {
5349351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
53505f029c04SYi Zhuang
535171419129SChao Yu kvfree(dirty_i->pinned_secmap);
535239307a8eSJaegeuk Kim kvfree(dirty_i->victim_secmap);
5353351df4b2SJaegeuk Kim }
5354351df4b2SJaegeuk Kim
destroy_dirty_segmap(struct f2fs_sb_info * sbi)5355351df4b2SJaegeuk Kim static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5356351df4b2SJaegeuk Kim {
5357351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5358351df4b2SJaegeuk Kim int i;
5359351df4b2SJaegeuk Kim
5360351df4b2SJaegeuk Kim if (!dirty_i)
5361351df4b2SJaegeuk Kim return;
5362351df4b2SJaegeuk Kim
5363351df4b2SJaegeuk Kim /* discard pre-free/dirty segments list */
5364351df4b2SJaegeuk Kim for (i = 0; i < NR_DIRTY_TYPE; i++)
5365351df4b2SJaegeuk Kim discard_dirty_segmap(sbi, i);
5366351df4b2SJaegeuk Kim
5367da52f8adSJack Qiu if (__is_large_section(sbi)) {
5368da52f8adSJack Qiu mutex_lock(&dirty_i->seglist_lock);
5369da52f8adSJack Qiu kvfree(dirty_i->dirty_secmap);
5370da52f8adSJack Qiu mutex_unlock(&dirty_i->seglist_lock);
5371da52f8adSJack Qiu }
5372da52f8adSJack Qiu
53735ec4e49fSJaegeuk Kim destroy_victim_secmap(sbi);
5374351df4b2SJaegeuk Kim SM_I(sbi)->dirty_info = NULL;
5375c8eb7024SChao Yu kfree(dirty_i);
5376351df4b2SJaegeuk Kim }
5377351df4b2SJaegeuk Kim
destroy_curseg(struct f2fs_sb_info * sbi)5378351df4b2SJaegeuk Kim static void destroy_curseg(struct f2fs_sb_info *sbi)
5379351df4b2SJaegeuk Kim {
5380351df4b2SJaegeuk Kim struct curseg_info *array = SM_I(sbi)->curseg_array;
5381351df4b2SJaegeuk Kim int i;
5382351df4b2SJaegeuk Kim
5383351df4b2SJaegeuk Kim if (!array)
5384351df4b2SJaegeuk Kim return;
5385351df4b2SJaegeuk Kim SM_I(sbi)->curseg_array = NULL;
5386b7ad7512SChao Yu for (i = 0; i < NR_CURSEG_TYPE; i++) {
5387c8eb7024SChao Yu kfree(array[i].sum_blk);
5388c8eb7024SChao Yu kfree(array[i].journal);
5389b7ad7512SChao Yu }
5390c8eb7024SChao Yu kfree(array);
5391351df4b2SJaegeuk Kim }
5392351df4b2SJaegeuk Kim
destroy_free_segmap(struct f2fs_sb_info * sbi)5393351df4b2SJaegeuk Kim static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5394351df4b2SJaegeuk Kim {
5395351df4b2SJaegeuk Kim struct free_segmap_info *free_i = SM_I(sbi)->free_info;
53965f029c04SYi Zhuang
5397351df4b2SJaegeuk Kim if (!free_i)
5398351df4b2SJaegeuk Kim return;
5399351df4b2SJaegeuk Kim SM_I(sbi)->free_info = NULL;
540039307a8eSJaegeuk Kim kvfree(free_i->free_segmap);
540139307a8eSJaegeuk Kim kvfree(free_i->free_secmap);
5402c8eb7024SChao Yu kfree(free_i);
5403351df4b2SJaegeuk Kim }
5404351df4b2SJaegeuk Kim
destroy_sit_info(struct f2fs_sb_info * sbi)5405351df4b2SJaegeuk Kim static void destroy_sit_info(struct f2fs_sb_info *sbi)
5406351df4b2SJaegeuk Kim {
5407351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi);
5408351df4b2SJaegeuk Kim
5409351df4b2SJaegeuk Kim if (!sit_i)
5410351df4b2SJaegeuk Kim return;
5411351df4b2SJaegeuk Kim
54122fde3dd1SChao Yu if (sit_i->sentries)
54132fde3dd1SChao Yu kvfree(sit_i->bitmap);
5414c8eb7024SChao Yu kfree(sit_i->tmp_map);
541560a3b782SJaegeuk Kim
541639307a8eSJaegeuk Kim kvfree(sit_i->sentries);
541739307a8eSJaegeuk Kim kvfree(sit_i->sec_entries);
541839307a8eSJaegeuk Kim kvfree(sit_i->dirty_sentries_bitmap);
5419351df4b2SJaegeuk Kim
5420351df4b2SJaegeuk Kim SM_I(sbi)->sit_info = NULL;
54215222595dSJaegeuk Kim kvfree(sit_i->sit_bitmap);
5422ae27d62eSChao Yu #ifdef CONFIG_F2FS_CHECK_FS
54235222595dSJaegeuk Kim kvfree(sit_i->sit_bitmap_mir);
5424bbf9f7d9SSahitya Tummala kvfree(sit_i->invalid_segmap);
5425ae27d62eSChao Yu #endif
5426c8eb7024SChao Yu kfree(sit_i);
5427351df4b2SJaegeuk Kim }
5428351df4b2SJaegeuk Kim
f2fs_destroy_segment_manager(struct f2fs_sb_info * sbi)54294d57b86dSChao Yu void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5430351df4b2SJaegeuk Kim {
5431351df4b2SJaegeuk Kim struct f2fs_sm_info *sm_info = SM_I(sbi);
5432a688b9d9SGu Zheng
54333b03f724SChao Yu if (!sm_info)
54343b03f724SChao Yu return;
54354d57b86dSChao Yu f2fs_destroy_flush_cmd_control(sbi, true);
5436f099405fSChao Yu destroy_discard_cmd_control(sbi);
5437351df4b2SJaegeuk Kim destroy_dirty_segmap(sbi);
5438351df4b2SJaegeuk Kim destroy_curseg(sbi);
5439351df4b2SJaegeuk Kim destroy_free_segmap(sbi);
5440351df4b2SJaegeuk Kim destroy_sit_info(sbi);
5441351df4b2SJaegeuk Kim sbi->sm_info = NULL;
5442c8eb7024SChao Yu kfree(sm_info);
5443351df4b2SJaegeuk Kim }
54447fd9e544SJaegeuk Kim
f2fs_create_segment_manager_caches(void)54454d57b86dSChao Yu int __init f2fs_create_segment_manager_caches(void)
54467fd9e544SJaegeuk Kim {
544798510003SChao Yu discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry",
5448e8512d2eSGu Zheng sizeof(struct discard_entry));
54497fd9e544SJaegeuk Kim if (!discard_entry_slab)
5450184a5cd2SChao Yu goto fail;
5451184a5cd2SChao Yu
545298510003SChao Yu discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd",
5453b01a9201SJaegeuk Kim sizeof(struct discard_cmd));
5454b01a9201SJaegeuk Kim if (!discard_cmd_slab)
54556ab2a308SChao Yu goto destroy_discard_entry;
5456275b66b0SChao Yu
545798510003SChao Yu sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set",
5458c9ee0085SChangman Lee sizeof(struct sit_entry_set));
5459184a5cd2SChao Yu if (!sit_entry_set_slab)
5460b01a9201SJaegeuk Kim goto destroy_discard_cmd;
546188b88a66SJaegeuk Kim
54623db1de0eSDaeho Jeong revoke_entry_slab = f2fs_kmem_cache_create("f2fs_revoke_entry",
54633db1de0eSDaeho Jeong sizeof(struct revoke_entry));
54643db1de0eSDaeho Jeong if (!revoke_entry_slab)
546588b88a66SJaegeuk Kim goto destroy_sit_entry_set;
54667fd9e544SJaegeuk Kim return 0;
5467184a5cd2SChao Yu
546888b88a66SJaegeuk Kim destroy_sit_entry_set:
546988b88a66SJaegeuk Kim kmem_cache_destroy(sit_entry_set_slab);
5470b01a9201SJaegeuk Kim destroy_discard_cmd:
5471b01a9201SJaegeuk Kim kmem_cache_destroy(discard_cmd_slab);
54726ab2a308SChao Yu destroy_discard_entry:
5473184a5cd2SChao Yu kmem_cache_destroy(discard_entry_slab);
5474184a5cd2SChao Yu fail:
5475184a5cd2SChao Yu return -ENOMEM;
54767fd9e544SJaegeuk Kim }
54777fd9e544SJaegeuk Kim
f2fs_destroy_segment_manager_caches(void)54784d57b86dSChao Yu void f2fs_destroy_segment_manager_caches(void)
54797fd9e544SJaegeuk Kim {
5480184a5cd2SChao Yu kmem_cache_destroy(sit_entry_set_slab);
5481b01a9201SJaegeuk Kim kmem_cache_destroy(discard_cmd_slab);
54827fd9e544SJaegeuk Kim kmem_cache_destroy(discard_entry_slab);
54833db1de0eSDaeho Jeong kmem_cache_destroy(revoke_entry_slab);
54847fd9e544SJaegeuk Kim }
5485