10a8165d7SJaegeuk Kim /* 2351df4b2SJaegeuk Kim * fs/f2fs/segment.c 3351df4b2SJaegeuk Kim * 4351df4b2SJaegeuk Kim * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5351df4b2SJaegeuk Kim * http://www.samsung.com/ 6351df4b2SJaegeuk Kim * 7351df4b2SJaegeuk Kim * This program is free software; you can redistribute it and/or modify 8351df4b2SJaegeuk Kim * it under the terms of the GNU General Public License version 2 as 9351df4b2SJaegeuk Kim * published by the Free Software Foundation. 10351df4b2SJaegeuk Kim */ 11351df4b2SJaegeuk Kim #include <linux/fs.h> 12351df4b2SJaegeuk Kim #include <linux/f2fs_fs.h> 13351df4b2SJaegeuk Kim #include <linux/bio.h> 14351df4b2SJaegeuk Kim #include <linux/blkdev.h> 15690e4a3eSGeert Uytterhoeven #include <linux/prefetch.h> 166b4afdd7SJaegeuk Kim #include <linux/kthread.h> 1774de593aSChao Yu #include <linux/swap.h> 1860b99b48SJaegeuk Kim #include <linux/timer.h> 19351df4b2SJaegeuk Kim 20351df4b2SJaegeuk Kim #include "f2fs.h" 21351df4b2SJaegeuk Kim #include "segment.h" 22351df4b2SJaegeuk Kim #include "node.h" 239e4ded3fSJaegeuk Kim #include "trace.h" 246ec178daSNamjae Jeon #include <trace/events/f2fs.h> 25351df4b2SJaegeuk Kim 269a7f143aSChangman Lee #define __reverse_ffz(x) __reverse_ffs(~(x)) 279a7f143aSChangman Lee 287fd9e544SJaegeuk Kim static struct kmem_cache *discard_entry_slab; 29184a5cd2SChao Yu static struct kmem_cache *sit_entry_set_slab; 3088b88a66SJaegeuk Kim static struct kmem_cache *inmem_entry_slab; 317fd9e544SJaegeuk Kim 32f96999c3SJaegeuk Kim static unsigned long __reverse_ulong(unsigned char *str) 33f96999c3SJaegeuk Kim { 34f96999c3SJaegeuk Kim unsigned long tmp = 0; 35f96999c3SJaegeuk Kim int shift = 24, idx = 0; 36f96999c3SJaegeuk Kim 37f96999c3SJaegeuk Kim #if BITS_PER_LONG == 64 38f96999c3SJaegeuk Kim shift = 56; 39f96999c3SJaegeuk Kim #endif 40f96999c3SJaegeuk Kim while (shift >= 0) { 41f96999c3SJaegeuk Kim tmp |= (unsigned long)str[idx++] << shift; 42f96999c3SJaegeuk Kim shift -= BITS_PER_BYTE; 43f96999c3SJaegeuk Kim } 44f96999c3SJaegeuk Kim return tmp; 45f96999c3SJaegeuk Kim } 46f96999c3SJaegeuk Kim 479a7f143aSChangman Lee /* 489a7f143aSChangman Lee * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since 499a7f143aSChangman Lee * MSB and LSB are reversed in a byte by f2fs_set_bit. 509a7f143aSChangman Lee */ 519a7f143aSChangman Lee static inline unsigned long __reverse_ffs(unsigned long word) 529a7f143aSChangman Lee { 539a7f143aSChangman Lee int num = 0; 549a7f143aSChangman Lee 559a7f143aSChangman Lee #if BITS_PER_LONG == 64 56f96999c3SJaegeuk Kim if ((word & 0xffffffff00000000UL) == 0) 579a7f143aSChangman Lee num += 32; 58f96999c3SJaegeuk Kim else 599a7f143aSChangman Lee word >>= 32; 609a7f143aSChangman Lee #endif 61f96999c3SJaegeuk Kim if ((word & 0xffff0000) == 0) 629a7f143aSChangman Lee num += 16; 63f96999c3SJaegeuk Kim else 649a7f143aSChangman Lee word >>= 16; 65f96999c3SJaegeuk Kim 66f96999c3SJaegeuk Kim if ((word & 0xff00) == 0) 679a7f143aSChangman Lee num += 8; 68f96999c3SJaegeuk Kim else 699a7f143aSChangman Lee word >>= 8; 70f96999c3SJaegeuk Kim 719a7f143aSChangman Lee if ((word & 0xf0) == 0) 729a7f143aSChangman Lee num += 4; 739a7f143aSChangman Lee else 749a7f143aSChangman Lee word >>= 4; 75f96999c3SJaegeuk Kim 769a7f143aSChangman Lee if ((word & 0xc) == 0) 779a7f143aSChangman Lee num += 2; 789a7f143aSChangman Lee else 799a7f143aSChangman Lee word >>= 2; 80f96999c3SJaegeuk Kim 819a7f143aSChangman Lee if ((word & 0x2) == 0) 829a7f143aSChangman Lee num += 1; 839a7f143aSChangman Lee return num; 849a7f143aSChangman Lee } 859a7f143aSChangman Lee 869a7f143aSChangman Lee /* 87e1c42045Sarter97 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because 889a7f143aSChangman Lee * f2fs_set_bit makes MSB and LSB reversed in a byte. 89692223d1SFan Li * @size must be integral times of unsigned long. 909a7f143aSChangman Lee * Example: 91f96999c3SJaegeuk Kim * MSB <--> LSB 92f96999c3SJaegeuk Kim * f2fs_set_bit(0, bitmap) => 1000 0000 93f96999c3SJaegeuk Kim * f2fs_set_bit(7, bitmap) => 0000 0001 949a7f143aSChangman Lee */ 959a7f143aSChangman Lee static unsigned long __find_rev_next_bit(const unsigned long *addr, 969a7f143aSChangman Lee unsigned long size, unsigned long offset) 979a7f143aSChangman Lee { 989a7f143aSChangman Lee const unsigned long *p = addr + BIT_WORD(offset); 99692223d1SFan Li unsigned long result = size; 1009a7f143aSChangman Lee unsigned long tmp; 1019a7f143aSChangman Lee 1029a7f143aSChangman Lee if (offset >= size) 1039a7f143aSChangman Lee return size; 1049a7f143aSChangman Lee 105692223d1SFan Li size -= (offset & ~(BITS_PER_LONG - 1)); 1069a7f143aSChangman Lee offset %= BITS_PER_LONG; 107692223d1SFan Li 108692223d1SFan Li while (1) { 109692223d1SFan Li if (*p == 0) 110692223d1SFan Li goto pass; 1119a7f143aSChangman Lee 112f96999c3SJaegeuk Kim tmp = __reverse_ulong((unsigned char *)p); 113692223d1SFan Li 114f96999c3SJaegeuk Kim tmp &= ~0UL >> offset; 1159a7f143aSChangman Lee if (size < BITS_PER_LONG) 116692223d1SFan Li tmp &= (~0UL << (BITS_PER_LONG - size)); 1179a7f143aSChangman Lee if (tmp) 118692223d1SFan Li goto found; 119692223d1SFan Li pass: 120692223d1SFan Li if (size <= BITS_PER_LONG) 121692223d1SFan Li break; 1229a7f143aSChangman Lee size -= BITS_PER_LONG; 123692223d1SFan Li offset = 0; 124f96999c3SJaegeuk Kim p++; 1259a7f143aSChangman Lee } 1269a7f143aSChangman Lee return result; 127692223d1SFan Li found: 128692223d1SFan Li return result - size + __reverse_ffs(tmp); 1299a7f143aSChangman Lee } 1309a7f143aSChangman Lee 1319a7f143aSChangman Lee static unsigned long __find_rev_next_zero_bit(const unsigned long *addr, 1329a7f143aSChangman Lee unsigned long size, unsigned long offset) 1339a7f143aSChangman Lee { 1349a7f143aSChangman Lee const unsigned long *p = addr + BIT_WORD(offset); 13580609448SJaegeuk Kim unsigned long result = size; 1369a7f143aSChangman Lee unsigned long tmp; 1379a7f143aSChangman Lee 1389a7f143aSChangman Lee if (offset >= size) 1399a7f143aSChangman Lee return size; 1409a7f143aSChangman Lee 14180609448SJaegeuk Kim size -= (offset & ~(BITS_PER_LONG - 1)); 1429a7f143aSChangman Lee offset %= BITS_PER_LONG; 14380609448SJaegeuk Kim 14480609448SJaegeuk Kim while (1) { 14580609448SJaegeuk Kim if (*p == ~0UL) 14680609448SJaegeuk Kim goto pass; 1479a7f143aSChangman Lee 148f96999c3SJaegeuk Kim tmp = __reverse_ulong((unsigned char *)p); 149f96999c3SJaegeuk Kim 15080609448SJaegeuk Kim if (offset) 15180609448SJaegeuk Kim tmp |= ~0UL << (BITS_PER_LONG - offset); 1529a7f143aSChangman Lee if (size < BITS_PER_LONG) 15380609448SJaegeuk Kim tmp |= ~0UL >> size; 154f96999c3SJaegeuk Kim if (tmp != ~0UL) 15580609448SJaegeuk Kim goto found; 15680609448SJaegeuk Kim pass: 15780609448SJaegeuk Kim if (size <= BITS_PER_LONG) 15880609448SJaegeuk Kim break; 1599a7f143aSChangman Lee size -= BITS_PER_LONG; 16080609448SJaegeuk Kim offset = 0; 161f96999c3SJaegeuk Kim p++; 1629a7f143aSChangman Lee } 1639a7f143aSChangman Lee return result; 16480609448SJaegeuk Kim found: 16580609448SJaegeuk Kim return result - size + __reverse_ffz(tmp); 1669a7f143aSChangman Lee } 1679a7f143aSChangman Lee 16888b88a66SJaegeuk Kim void register_inmem_page(struct inode *inode, struct page *page) 16988b88a66SJaegeuk Kim { 17088b88a66SJaegeuk Kim struct f2fs_inode_info *fi = F2FS_I(inode); 17188b88a66SJaegeuk Kim struct inmem_pages *new; 1729be32d72SJaegeuk Kim 1739e4ded3fSJaegeuk Kim f2fs_trace_pid(page); 1740722b101SJaegeuk Kim 175decd36b6SChao Yu set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE); 176decd36b6SChao Yu SetPagePrivate(page); 177decd36b6SChao Yu 17888b88a66SJaegeuk Kim new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS); 17988b88a66SJaegeuk Kim 18088b88a66SJaegeuk Kim /* add atomic page indices to the list */ 18188b88a66SJaegeuk Kim new->page = page; 18288b88a66SJaegeuk Kim INIT_LIST_HEAD(&new->list); 183decd36b6SChao Yu 18488b88a66SJaegeuk Kim /* increase reference count with clean state */ 18588b88a66SJaegeuk Kim mutex_lock(&fi->inmem_lock); 18688b88a66SJaegeuk Kim get_page(page); 18788b88a66SJaegeuk Kim list_add_tail(&new->list, &fi->inmem_pages); 1888dcf2ff7SJaegeuk Kim inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); 18988b88a66SJaegeuk Kim mutex_unlock(&fi->inmem_lock); 1908ce67cb0SJaegeuk Kim 1918ce67cb0SJaegeuk Kim trace_f2fs_register_inmem_page(page, INMEM); 19288b88a66SJaegeuk Kim } 19388b88a66SJaegeuk Kim 194edb27deeSJaegeuk Kim int commit_inmem_pages(struct inode *inode, bool abort) 19588b88a66SJaegeuk Kim { 19688b88a66SJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 19788b88a66SJaegeuk Kim struct f2fs_inode_info *fi = F2FS_I(inode); 19888b88a66SJaegeuk Kim struct inmem_pages *cur, *tmp; 19988b88a66SJaegeuk Kim bool submit_bio = false; 20088b88a66SJaegeuk Kim struct f2fs_io_info fio = { 20105ca3632SJaegeuk Kim .sbi = sbi, 20288b88a66SJaegeuk Kim .type = DATA, 2031e84371fSJaegeuk Kim .rw = WRITE_SYNC | REQ_PRIO, 2044375a336SJaegeuk Kim .encrypted_page = NULL, 20588b88a66SJaegeuk Kim }; 206edb27deeSJaegeuk Kim int err = 0; 20788b88a66SJaegeuk Kim 2080341845eSJaegeuk Kim /* 2090341845eSJaegeuk Kim * The abort is true only when f2fs_evict_inode is called. 2100341845eSJaegeuk Kim * Basically, the f2fs_evict_inode doesn't produce any data writes, so 2110341845eSJaegeuk Kim * that we don't need to call f2fs_balance_fs. 2120341845eSJaegeuk Kim * Otherwise, f2fs_gc in f2fs_balance_fs can wait forever until this 2130341845eSJaegeuk Kim * inode becomes free by iget_locked in f2fs_iget. 2140341845eSJaegeuk Kim */ 21570c640b1SJaegeuk Kim if (!abort) { 216*2c4db1a6SJaegeuk Kim f2fs_balance_fs(sbi, true); 21788b88a66SJaegeuk Kim f2fs_lock_op(sbi); 21870c640b1SJaegeuk Kim } 21988b88a66SJaegeuk Kim 22088b88a66SJaegeuk Kim mutex_lock(&fi->inmem_lock); 22188b88a66SJaegeuk Kim list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) { 22288b88a66SJaegeuk Kim lock_page(cur->page); 223decd36b6SChao Yu if (!abort) { 22470c640b1SJaegeuk Kim if (cur->page->mapping == inode->i_mapping) { 2256282adbfSJaegeuk Kim set_page_dirty(cur->page); 22688b88a66SJaegeuk Kim f2fs_wait_on_page_writeback(cur->page, DATA); 22788b88a66SJaegeuk Kim if (clear_page_dirty_for_io(cur->page)) 22888b88a66SJaegeuk Kim inode_dec_dirty_pages(inode); 2298ce67cb0SJaegeuk Kim trace_f2fs_commit_inmem_page(cur->page, INMEM); 23005ca3632SJaegeuk Kim fio.page = cur->page; 231edb27deeSJaegeuk Kim err = do_write_data_page(&fio); 232edb27deeSJaegeuk Kim if (err) { 233edb27deeSJaegeuk Kim unlock_page(cur->page); 234edb27deeSJaegeuk Kim break; 235edb27deeSJaegeuk Kim } 2367fee7406SChao Yu clear_cold_data(cur->page); 2372b246fb0SJaegeuk Kim submit_bio = true; 23888b88a66SJaegeuk Kim } 23970c640b1SJaegeuk Kim } else { 240f478f43fSChao Yu ClearPageUptodate(cur->page); 2418ce67cb0SJaegeuk Kim trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP); 24270c640b1SJaegeuk Kim } 243decd36b6SChao Yu set_page_private(cur->page, 0); 244decd36b6SChao Yu ClearPagePrivate(cur->page); 245decd36b6SChao Yu f2fs_put_page(cur->page, 1); 246decd36b6SChao Yu 24788b88a66SJaegeuk Kim list_del(&cur->list); 24888b88a66SJaegeuk Kim kmem_cache_free(inmem_entry_slab, cur); 2498dcf2ff7SJaegeuk Kim dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); 25088b88a66SJaegeuk Kim } 25188b88a66SJaegeuk Kim mutex_unlock(&fi->inmem_lock); 25288b88a66SJaegeuk Kim 25370c640b1SJaegeuk Kim if (!abort) { 25488b88a66SJaegeuk Kim f2fs_unlock_op(sbi); 25570c640b1SJaegeuk Kim if (submit_bio) 25670c640b1SJaegeuk Kim f2fs_submit_merged_bio(sbi, DATA, WRITE); 25770c640b1SJaegeuk Kim } 258edb27deeSJaegeuk Kim return err; 25988b88a66SJaegeuk Kim } 26088b88a66SJaegeuk Kim 2610a8165d7SJaegeuk Kim /* 262351df4b2SJaegeuk Kim * This function balances dirty node and dentry pages. 263351df4b2SJaegeuk Kim * In addition, it controls garbage collection. 264351df4b2SJaegeuk Kim */ 265*2c4db1a6SJaegeuk Kim void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) 266351df4b2SJaegeuk Kim { 267*2c4db1a6SJaegeuk Kim if (!need) 268*2c4db1a6SJaegeuk Kim return; 269351df4b2SJaegeuk Kim /* 270029cd28cSJaegeuk Kim * We should do GC or end up with checkpoint, if there are so many dirty 271029cd28cSJaegeuk Kim * dir/node pages without enough free segments. 272351df4b2SJaegeuk Kim */ 27343727527SJaegeuk Kim if (has_not_enough_free_secs(sbi, 0)) { 274351df4b2SJaegeuk Kim mutex_lock(&sbi->gc_mutex); 275d530d4d8SChao Yu f2fs_gc(sbi, false); 276351df4b2SJaegeuk Kim } 277351df4b2SJaegeuk Kim } 278351df4b2SJaegeuk Kim 2794660f9c0SJaegeuk Kim void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) 2804660f9c0SJaegeuk Kim { 2811dcc336bSChao Yu /* try to shrink extent cache when there is no enough memory */ 282554df79eSJaegeuk Kim if (!available_free_memory(sbi, EXTENT_CACHE)) 2831dcc336bSChao Yu f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); 2841dcc336bSChao Yu 2851b38dc8eSJaegeuk Kim /* check the # of cached NAT entries */ 2861b38dc8eSJaegeuk Kim if (!available_free_memory(sbi, NAT_ENTRIES)) 2871b38dc8eSJaegeuk Kim try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); 2881b38dc8eSJaegeuk Kim 28931696580SChao Yu if (!available_free_memory(sbi, FREE_NIDS)) 29031696580SChao Yu try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES); 29131696580SChao Yu 2921b38dc8eSJaegeuk Kim /* checkpoint is the only way to shrink partial cached entries */ 2931b38dc8eSJaegeuk Kim if (!available_free_memory(sbi, NAT_ENTRIES) || 294e5e7ea3cSJaegeuk Kim excess_prefree_segs(sbi) || 29560b99b48SJaegeuk Kim !available_free_memory(sbi, INO_ENTRIES) || 29636b35a0dSChao Yu jiffies > sbi->cp_expires) { 29736b35a0dSChao Yu if (test_opt(sbi, DATA_FLUSH)) 29836b35a0dSChao Yu sync_dirty_inodes(sbi, FILE_INODE); 2994660f9c0SJaegeuk Kim f2fs_sync_fs(sbi->sb, true); 3004660f9c0SJaegeuk Kim } 30136b35a0dSChao Yu } 3024660f9c0SJaegeuk Kim 3032163d198SGu Zheng static int issue_flush_thread(void *data) 3046b4afdd7SJaegeuk Kim { 3056b4afdd7SJaegeuk Kim struct f2fs_sb_info *sbi = data; 306a688b9d9SGu Zheng struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; 307a688b9d9SGu Zheng wait_queue_head_t *q = &fcc->flush_wait_queue; 3086b4afdd7SJaegeuk Kim repeat: 3096b4afdd7SJaegeuk Kim if (kthread_should_stop()) 3106b4afdd7SJaegeuk Kim return 0; 3116b4afdd7SJaegeuk Kim 312721bd4d5SGu Zheng if (!llist_empty(&fcc->issue_list)) { 313740432f8SJaegeuk Kim struct bio *bio; 3146b4afdd7SJaegeuk Kim struct flush_cmd *cmd, *next; 3156b4afdd7SJaegeuk Kim int ret; 3166b4afdd7SJaegeuk Kim 317740432f8SJaegeuk Kim bio = f2fs_bio_alloc(0); 318740432f8SJaegeuk Kim 319721bd4d5SGu Zheng fcc->dispatch_list = llist_del_all(&fcc->issue_list); 320721bd4d5SGu Zheng fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); 321721bd4d5SGu Zheng 3226b4afdd7SJaegeuk Kim bio->bi_bdev = sbi->sb->s_bdev; 3236b4afdd7SJaegeuk Kim ret = submit_bio_wait(WRITE_FLUSH, bio); 3246b4afdd7SJaegeuk Kim 325721bd4d5SGu Zheng llist_for_each_entry_safe(cmd, next, 326721bd4d5SGu Zheng fcc->dispatch_list, llnode) { 3276b4afdd7SJaegeuk Kim cmd->ret = ret; 3286b4afdd7SJaegeuk Kim complete(&cmd->wait); 3296b4afdd7SJaegeuk Kim } 330a4ed23f2SGu Zheng bio_put(bio); 331a688b9d9SGu Zheng fcc->dispatch_list = NULL; 3326b4afdd7SJaegeuk Kim } 3336b4afdd7SJaegeuk Kim 334a688b9d9SGu Zheng wait_event_interruptible(*q, 335721bd4d5SGu Zheng kthread_should_stop() || !llist_empty(&fcc->issue_list)); 3366b4afdd7SJaegeuk Kim goto repeat; 3376b4afdd7SJaegeuk Kim } 3386b4afdd7SJaegeuk Kim 3396b4afdd7SJaegeuk Kim int f2fs_issue_flush(struct f2fs_sb_info *sbi) 3406b4afdd7SJaegeuk Kim { 341a688b9d9SGu Zheng struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; 342adf8d90bSChao Yu struct flush_cmd cmd; 3436b4afdd7SJaegeuk Kim 34424a9ee0fSJaegeuk Kim trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER), 34524a9ee0fSJaegeuk Kim test_opt(sbi, FLUSH_MERGE)); 34624a9ee0fSJaegeuk Kim 3470f7b2abdSJaegeuk Kim if (test_opt(sbi, NOBARRIER)) 3480f7b2abdSJaegeuk Kim return 0; 3490f7b2abdSJaegeuk Kim 350740432f8SJaegeuk Kim if (!test_opt(sbi, FLUSH_MERGE)) { 351740432f8SJaegeuk Kim struct bio *bio = f2fs_bio_alloc(0); 352740432f8SJaegeuk Kim int ret; 353740432f8SJaegeuk Kim 354740432f8SJaegeuk Kim bio->bi_bdev = sbi->sb->s_bdev; 355740432f8SJaegeuk Kim ret = submit_bio_wait(WRITE_FLUSH, bio); 356740432f8SJaegeuk Kim bio_put(bio); 357740432f8SJaegeuk Kim return ret; 358740432f8SJaegeuk Kim } 3596b4afdd7SJaegeuk Kim 360adf8d90bSChao Yu init_completion(&cmd.wait); 3616b4afdd7SJaegeuk Kim 362721bd4d5SGu Zheng llist_add(&cmd.llnode, &fcc->issue_list); 3636b4afdd7SJaegeuk Kim 364a688b9d9SGu Zheng if (!fcc->dispatch_list) 365a688b9d9SGu Zheng wake_up(&fcc->flush_wait_queue); 3666b4afdd7SJaegeuk Kim 367adf8d90bSChao Yu wait_for_completion(&cmd.wait); 368adf8d90bSChao Yu 369adf8d90bSChao Yu return cmd.ret; 3706b4afdd7SJaegeuk Kim } 3716b4afdd7SJaegeuk Kim 3722163d198SGu Zheng int create_flush_cmd_control(struct f2fs_sb_info *sbi) 3732163d198SGu Zheng { 3742163d198SGu Zheng dev_t dev = sbi->sb->s_bdev->bd_dev; 3752163d198SGu Zheng struct flush_cmd_control *fcc; 3762163d198SGu Zheng int err = 0; 3772163d198SGu Zheng 3782163d198SGu Zheng fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL); 3792163d198SGu Zheng if (!fcc) 3802163d198SGu Zheng return -ENOMEM; 3812163d198SGu Zheng init_waitqueue_head(&fcc->flush_wait_queue); 382721bd4d5SGu Zheng init_llist_head(&fcc->issue_list); 3836b2920a5SChao Yu SM_I(sbi)->cmd_control_info = fcc; 3842163d198SGu Zheng fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, 3852163d198SGu Zheng "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); 3862163d198SGu Zheng if (IS_ERR(fcc->f2fs_issue_flush)) { 3872163d198SGu Zheng err = PTR_ERR(fcc->f2fs_issue_flush); 3882163d198SGu Zheng kfree(fcc); 3896b2920a5SChao Yu SM_I(sbi)->cmd_control_info = NULL; 3902163d198SGu Zheng return err; 3912163d198SGu Zheng } 3922163d198SGu Zheng 3932163d198SGu Zheng return err; 3942163d198SGu Zheng } 3952163d198SGu Zheng 3962163d198SGu Zheng void destroy_flush_cmd_control(struct f2fs_sb_info *sbi) 3972163d198SGu Zheng { 3986b2920a5SChao Yu struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; 3992163d198SGu Zheng 4002163d198SGu Zheng if (fcc && fcc->f2fs_issue_flush) 4012163d198SGu Zheng kthread_stop(fcc->f2fs_issue_flush); 4022163d198SGu Zheng kfree(fcc); 4036b2920a5SChao Yu SM_I(sbi)->cmd_control_info = NULL; 4042163d198SGu Zheng } 4052163d198SGu Zheng 406351df4b2SJaegeuk Kim static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 407351df4b2SJaegeuk Kim enum dirty_type dirty_type) 408351df4b2SJaegeuk Kim { 409351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 410351df4b2SJaegeuk Kim 411351df4b2SJaegeuk Kim /* need not be added */ 412351df4b2SJaegeuk Kim if (IS_CURSEG(sbi, segno)) 413351df4b2SJaegeuk Kim return; 414351df4b2SJaegeuk Kim 415351df4b2SJaegeuk Kim if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) 416351df4b2SJaegeuk Kim dirty_i->nr_dirty[dirty_type]++; 417351df4b2SJaegeuk Kim 418351df4b2SJaegeuk Kim if (dirty_type == DIRTY) { 419351df4b2SJaegeuk Kim struct seg_entry *sentry = get_seg_entry(sbi, segno); 4204625d6aaSChangman Lee enum dirty_type t = sentry->type; 421b2f2c390SJaegeuk Kim 422ec325b52SJaegeuk Kim if (unlikely(t >= DIRTY)) { 423ec325b52SJaegeuk Kim f2fs_bug_on(sbi, 1); 424ec325b52SJaegeuk Kim return; 425ec325b52SJaegeuk Kim } 4264625d6aaSChangman Lee if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) 4274625d6aaSChangman Lee dirty_i->nr_dirty[t]++; 428351df4b2SJaegeuk Kim } 429351df4b2SJaegeuk Kim } 430351df4b2SJaegeuk Kim 431351df4b2SJaegeuk Kim static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 432351df4b2SJaegeuk Kim enum dirty_type dirty_type) 433351df4b2SJaegeuk Kim { 434351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 435351df4b2SJaegeuk Kim 436351df4b2SJaegeuk Kim if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) 437351df4b2SJaegeuk Kim dirty_i->nr_dirty[dirty_type]--; 438351df4b2SJaegeuk Kim 439351df4b2SJaegeuk Kim if (dirty_type == DIRTY) { 4404625d6aaSChangman Lee struct seg_entry *sentry = get_seg_entry(sbi, segno); 4414625d6aaSChangman Lee enum dirty_type t = sentry->type; 442b2f2c390SJaegeuk Kim 4434625d6aaSChangman Lee if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) 444b2f2c390SJaegeuk Kim dirty_i->nr_dirty[t]--; 445b2f2c390SJaegeuk Kim 4465ec4e49fSJaegeuk Kim if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0) 4475ec4e49fSJaegeuk Kim clear_bit(GET_SECNO(sbi, segno), 4485ec4e49fSJaegeuk Kim dirty_i->victim_secmap); 449351df4b2SJaegeuk Kim } 450351df4b2SJaegeuk Kim } 451351df4b2SJaegeuk Kim 4520a8165d7SJaegeuk Kim /* 453351df4b2SJaegeuk Kim * Should not occur error such as -ENOMEM. 454351df4b2SJaegeuk Kim * Adding dirty entry into seglist is not critical operation. 455351df4b2SJaegeuk Kim * If a given segment is one of current working segments, it won't be added. 456351df4b2SJaegeuk Kim */ 4578d8451afSHaicheng Li static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) 458351df4b2SJaegeuk Kim { 459351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 460351df4b2SJaegeuk Kim unsigned short valid_blocks; 461351df4b2SJaegeuk Kim 462351df4b2SJaegeuk Kim if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) 463351df4b2SJaegeuk Kim return; 464351df4b2SJaegeuk Kim 465351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 466351df4b2SJaegeuk Kim 467351df4b2SJaegeuk Kim valid_blocks = get_valid_blocks(sbi, segno, 0); 468351df4b2SJaegeuk Kim 469351df4b2SJaegeuk Kim if (valid_blocks == 0) { 470351df4b2SJaegeuk Kim __locate_dirty_segment(sbi, segno, PRE); 471351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, segno, DIRTY); 472351df4b2SJaegeuk Kim } else if (valid_blocks < sbi->blocks_per_seg) { 473351df4b2SJaegeuk Kim __locate_dirty_segment(sbi, segno, DIRTY); 474351df4b2SJaegeuk Kim } else { 475351df4b2SJaegeuk Kim /* Recovery routine with SSR needs this */ 476351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, segno, DIRTY); 477351df4b2SJaegeuk Kim } 478351df4b2SJaegeuk Kim 479351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 480351df4b2SJaegeuk Kim } 481351df4b2SJaegeuk Kim 4821e87a78dSJaegeuk Kim static int f2fs_issue_discard(struct f2fs_sb_info *sbi, 48337208879SJaegeuk Kim block_t blkstart, block_t blklen) 48437208879SJaegeuk Kim { 48555cf9cb6SChao Yu sector_t start = SECTOR_FROM_BLOCK(blkstart); 48655cf9cb6SChao Yu sector_t len = SECTOR_FROM_BLOCK(blklen); 487a66cdd98SJaegeuk Kim struct seg_entry *se; 488a66cdd98SJaegeuk Kim unsigned int offset; 489a66cdd98SJaegeuk Kim block_t i; 490a66cdd98SJaegeuk Kim 491a66cdd98SJaegeuk Kim for (i = blkstart; i < blkstart + blklen; i++) { 492a66cdd98SJaegeuk Kim se = get_seg_entry(sbi, GET_SEGNO(sbi, i)); 493a66cdd98SJaegeuk Kim offset = GET_BLKOFF_FROM_SEG0(sbi, i); 494a66cdd98SJaegeuk Kim 495a66cdd98SJaegeuk Kim if (!f2fs_test_and_set_bit(offset, se->discard_map)) 496a66cdd98SJaegeuk Kim sbi->discard_blks--; 497a66cdd98SJaegeuk Kim } 4981661d07cSJaegeuk Kim trace_f2fs_issue_discard(sbi->sb, blkstart, blklen); 4991e87a78dSJaegeuk Kim return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0); 5001e87a78dSJaegeuk Kim } 5011e87a78dSJaegeuk Kim 502e90c2d28SChao Yu bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr) 5031e87a78dSJaegeuk Kim { 50440a02be1SJaegeuk Kim int err = -ENOTSUPP; 50540a02be1SJaegeuk Kim 50640a02be1SJaegeuk Kim if (test_opt(sbi, DISCARD)) { 50740a02be1SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, 50840a02be1SJaegeuk Kim GET_SEGNO(sbi, blkaddr)); 50940a02be1SJaegeuk Kim unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 51040a02be1SJaegeuk Kim 51140a02be1SJaegeuk Kim if (f2fs_test_bit(offset, se->discard_map)) 512e90c2d28SChao Yu return false; 51340a02be1SJaegeuk Kim 51440a02be1SJaegeuk Kim err = f2fs_issue_discard(sbi, blkaddr, 1); 51540a02be1SJaegeuk Kim } 51640a02be1SJaegeuk Kim 517e90c2d28SChao Yu if (err) { 518381722d2SChao Yu update_meta_page(sbi, NULL, blkaddr); 519e90c2d28SChao Yu return true; 520e90c2d28SChao Yu } 521e90c2d28SChao Yu return false; 52237208879SJaegeuk Kim } 52337208879SJaegeuk Kim 524adf4983bSJaegeuk Kim static void __add_discard_entry(struct f2fs_sb_info *sbi, 525a66cdd98SJaegeuk Kim struct cp_control *cpc, struct seg_entry *se, 526a66cdd98SJaegeuk Kim unsigned int start, unsigned int end) 527b2955550SJaegeuk Kim { 528b2955550SJaegeuk Kim struct list_head *head = &SM_I(sbi)->discard_list; 529adf4983bSJaegeuk Kim struct discard_entry *new, *last; 530adf4983bSJaegeuk Kim 531adf4983bSJaegeuk Kim if (!list_empty(head)) { 532adf4983bSJaegeuk Kim last = list_last_entry(head, struct discard_entry, list); 533adf4983bSJaegeuk Kim if (START_BLOCK(sbi, cpc->trim_start) + start == 534adf4983bSJaegeuk Kim last->blkaddr + last->len) { 535adf4983bSJaegeuk Kim last->len += end - start; 536adf4983bSJaegeuk Kim goto done; 537adf4983bSJaegeuk Kim } 538adf4983bSJaegeuk Kim } 539adf4983bSJaegeuk Kim 540adf4983bSJaegeuk Kim new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS); 541adf4983bSJaegeuk Kim INIT_LIST_HEAD(&new->list); 542adf4983bSJaegeuk Kim new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start; 543adf4983bSJaegeuk Kim new->len = end - start; 544adf4983bSJaegeuk Kim list_add_tail(&new->list, head); 545adf4983bSJaegeuk Kim done: 546adf4983bSJaegeuk Kim SM_I(sbi)->nr_discards += end - start; 547adf4983bSJaegeuk Kim } 548adf4983bSJaegeuk Kim 549adf4983bSJaegeuk Kim static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) 550adf4983bSJaegeuk Kim { 551b2955550SJaegeuk Kim int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 552b2955550SJaegeuk Kim int max_blocks = sbi->blocks_per_seg; 5534b2fecc8SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); 554b2955550SJaegeuk Kim unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 555b2955550SJaegeuk Kim unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 556a66cdd98SJaegeuk Kim unsigned long *discard_map = (unsigned long *)se->discard_map; 55760a3b782SJaegeuk Kim unsigned long *dmap = SIT_I(sbi)->tmp_map; 558b2955550SJaegeuk Kim unsigned int start = 0, end = -1; 5594b2fecc8SJaegeuk Kim bool force = (cpc->reason == CP_DISCARD); 560b2955550SJaegeuk Kim int i; 561b2955550SJaegeuk Kim 562a66cdd98SJaegeuk Kim if (se->valid_blocks == max_blocks) 563b2955550SJaegeuk Kim return; 564b2955550SJaegeuk Kim 565a66cdd98SJaegeuk Kim if (!force) { 566a66cdd98SJaegeuk Kim if (!test_opt(sbi, DISCARD) || !se->valid_blocks || 567a66cdd98SJaegeuk Kim SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards) 5684b2fecc8SJaegeuk Kim return; 5694b2fecc8SJaegeuk Kim } 570b2955550SJaegeuk Kim 571b2955550SJaegeuk Kim /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ 572b2955550SJaegeuk Kim for (i = 0; i < entries; i++) 573a66cdd98SJaegeuk Kim dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] : 574d7bc2484SJaegeuk Kim (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; 575b2955550SJaegeuk Kim 5764b2fecc8SJaegeuk Kim while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) { 577b2955550SJaegeuk Kim start = __find_rev_next_bit(dmap, max_blocks, end + 1); 578b2955550SJaegeuk Kim if (start >= max_blocks) 579b2955550SJaegeuk Kim break; 580b2955550SJaegeuk Kim 581b2955550SJaegeuk Kim end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1); 582a66cdd98SJaegeuk Kim __add_discard_entry(sbi, cpc, se, start, end); 583b2955550SJaegeuk Kim } 584b2955550SJaegeuk Kim } 585b2955550SJaegeuk Kim 5864b2fecc8SJaegeuk Kim void release_discard_addrs(struct f2fs_sb_info *sbi) 5874b2fecc8SJaegeuk Kim { 5884b2fecc8SJaegeuk Kim struct list_head *head = &(SM_I(sbi)->discard_list); 5894b2fecc8SJaegeuk Kim struct discard_entry *entry, *this; 5904b2fecc8SJaegeuk Kim 5914b2fecc8SJaegeuk Kim /* drop caches */ 5924b2fecc8SJaegeuk Kim list_for_each_entry_safe(entry, this, head, list) { 5934b2fecc8SJaegeuk Kim list_del(&entry->list); 5944b2fecc8SJaegeuk Kim kmem_cache_free(discard_entry_slab, entry); 5954b2fecc8SJaegeuk Kim } 5964b2fecc8SJaegeuk Kim } 5974b2fecc8SJaegeuk Kim 5980a8165d7SJaegeuk Kim /* 599351df4b2SJaegeuk Kim * Should call clear_prefree_segments after checkpoint is done. 600351df4b2SJaegeuk Kim */ 601351df4b2SJaegeuk Kim static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) 602351df4b2SJaegeuk Kim { 603351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 604b65ee148SChao Yu unsigned int segno; 605351df4b2SJaegeuk Kim 606351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 6077cd8558bSJaegeuk Kim for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi)) 608351df4b2SJaegeuk Kim __set_test_and_free(sbi, segno); 609351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 610351df4b2SJaegeuk Kim } 611351df4b2SJaegeuk Kim 612836b5a63SJaegeuk Kim void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc) 613351df4b2SJaegeuk Kim { 614b2955550SJaegeuk Kim struct list_head *head = &(SM_I(sbi)->discard_list); 6152d7b822aSChao Yu struct discard_entry *entry, *this; 616351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 61729e59c14SChangman Lee unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; 61829e59c14SChangman Lee unsigned int start = 0, end = -1; 619351df4b2SJaegeuk Kim 620351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 62129e59c14SChangman Lee 622351df4b2SJaegeuk Kim while (1) { 62329e59c14SChangman Lee int i; 6247cd8558bSJaegeuk Kim start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1); 6257cd8558bSJaegeuk Kim if (start >= MAIN_SEGS(sbi)) 626351df4b2SJaegeuk Kim break; 6277cd8558bSJaegeuk Kim end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi), 6287cd8558bSJaegeuk Kim start + 1); 629351df4b2SJaegeuk Kim 63029e59c14SChangman Lee for (i = start; i < end; i++) 63129e59c14SChangman Lee clear_bit(i, prefree_map); 632351df4b2SJaegeuk Kim 63329e59c14SChangman Lee dirty_i->nr_dirty[PRE] -= end - start; 63429e59c14SChangman Lee 63529e59c14SChangman Lee if (!test_opt(sbi, DISCARD)) 63629e59c14SChangman Lee continue; 63729e59c14SChangman Lee 63837208879SJaegeuk Kim f2fs_issue_discard(sbi, START_BLOCK(sbi, start), 63937208879SJaegeuk Kim (end - start) << sbi->log_blocks_per_seg); 640351df4b2SJaegeuk Kim } 641351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 642b2955550SJaegeuk Kim 643b2955550SJaegeuk Kim /* send small discards */ 6442d7b822aSChao Yu list_for_each_entry_safe(entry, this, head, list) { 645836b5a63SJaegeuk Kim if (cpc->reason == CP_DISCARD && entry->len < cpc->trim_minlen) 646836b5a63SJaegeuk Kim goto skip; 64737208879SJaegeuk Kim f2fs_issue_discard(sbi, entry->blkaddr, entry->len); 648f56aa1c5SJaegeuk Kim cpc->trimmed += entry->len; 649836b5a63SJaegeuk Kim skip: 650b2955550SJaegeuk Kim list_del(&entry->list); 651b2955550SJaegeuk Kim SM_I(sbi)->nr_discards -= entry->len; 652b2955550SJaegeuk Kim kmem_cache_free(discard_entry_slab, entry); 653b2955550SJaegeuk Kim } 654351df4b2SJaegeuk Kim } 655351df4b2SJaegeuk Kim 656184a5cd2SChao Yu static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) 657351df4b2SJaegeuk Kim { 658351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 659184a5cd2SChao Yu 660184a5cd2SChao Yu if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) { 661351df4b2SJaegeuk Kim sit_i->dirty_sentries++; 662184a5cd2SChao Yu return false; 663184a5cd2SChao Yu } 664184a5cd2SChao Yu 665184a5cd2SChao Yu return true; 666351df4b2SJaegeuk Kim } 667351df4b2SJaegeuk Kim 668351df4b2SJaegeuk Kim static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, 669351df4b2SJaegeuk Kim unsigned int segno, int modified) 670351df4b2SJaegeuk Kim { 671351df4b2SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, segno); 672351df4b2SJaegeuk Kim se->type = type; 673351df4b2SJaegeuk Kim if (modified) 674351df4b2SJaegeuk Kim __mark_sit_entry_dirty(sbi, segno); 675351df4b2SJaegeuk Kim } 676351df4b2SJaegeuk Kim 677351df4b2SJaegeuk Kim static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) 678351df4b2SJaegeuk Kim { 679351df4b2SJaegeuk Kim struct seg_entry *se; 680351df4b2SJaegeuk Kim unsigned int segno, offset; 681351df4b2SJaegeuk Kim long int new_vblocks; 682351df4b2SJaegeuk Kim 683351df4b2SJaegeuk Kim segno = GET_SEGNO(sbi, blkaddr); 684351df4b2SJaegeuk Kim 685351df4b2SJaegeuk Kim se = get_seg_entry(sbi, segno); 686351df4b2SJaegeuk Kim new_vblocks = se->valid_blocks + del; 687491c0854SJaegeuk Kim offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 688351df4b2SJaegeuk Kim 6899850cf4aSJaegeuk Kim f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) || 690351df4b2SJaegeuk Kim (new_vblocks > sbi->blocks_per_seg))); 691351df4b2SJaegeuk Kim 692351df4b2SJaegeuk Kim se->valid_blocks = new_vblocks; 693351df4b2SJaegeuk Kim se->mtime = get_mtime(sbi); 694351df4b2SJaegeuk Kim SIT_I(sbi)->max_mtime = se->mtime; 695351df4b2SJaegeuk Kim 696351df4b2SJaegeuk Kim /* Update valid block bitmap */ 697351df4b2SJaegeuk Kim if (del > 0) { 69852aca074SGu Zheng if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) 69905796763SJaegeuk Kim f2fs_bug_on(sbi, 1); 700a66cdd98SJaegeuk Kim if (!f2fs_test_and_set_bit(offset, se->discard_map)) 701a66cdd98SJaegeuk Kim sbi->discard_blks--; 702351df4b2SJaegeuk Kim } else { 70352aca074SGu Zheng if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) 70405796763SJaegeuk Kim f2fs_bug_on(sbi, 1); 705a66cdd98SJaegeuk Kim if (f2fs_test_and_clear_bit(offset, se->discard_map)) 706a66cdd98SJaegeuk Kim sbi->discard_blks++; 707351df4b2SJaegeuk Kim } 708351df4b2SJaegeuk Kim if (!f2fs_test_bit(offset, se->ckpt_valid_map)) 709351df4b2SJaegeuk Kim se->ckpt_valid_blocks += del; 710351df4b2SJaegeuk Kim 711351df4b2SJaegeuk Kim __mark_sit_entry_dirty(sbi, segno); 712351df4b2SJaegeuk Kim 713351df4b2SJaegeuk Kim /* update total number of valid blocks to be written in ckpt area */ 714351df4b2SJaegeuk Kim SIT_I(sbi)->written_valid_blocks += del; 715351df4b2SJaegeuk Kim 716351df4b2SJaegeuk Kim if (sbi->segs_per_sec > 1) 717351df4b2SJaegeuk Kim get_sec_entry(sbi, segno)->valid_blocks += del; 718351df4b2SJaegeuk Kim } 719351df4b2SJaegeuk Kim 7205e443818SJaegeuk Kim void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new) 721351df4b2SJaegeuk Kim { 7225e443818SJaegeuk Kim update_sit_entry(sbi, new, 1); 7235e443818SJaegeuk Kim if (GET_SEGNO(sbi, old) != NULL_SEGNO) 7245e443818SJaegeuk Kim update_sit_entry(sbi, old, -1); 7255e443818SJaegeuk Kim 7265e443818SJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, old)); 7275e443818SJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, new)); 728351df4b2SJaegeuk Kim } 729351df4b2SJaegeuk Kim 730351df4b2SJaegeuk Kim void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) 731351df4b2SJaegeuk Kim { 732351df4b2SJaegeuk Kim unsigned int segno = GET_SEGNO(sbi, addr); 733351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 734351df4b2SJaegeuk Kim 7359850cf4aSJaegeuk Kim f2fs_bug_on(sbi, addr == NULL_ADDR); 736351df4b2SJaegeuk Kim if (addr == NEW_ADDR) 737351df4b2SJaegeuk Kim return; 738351df4b2SJaegeuk Kim 739351df4b2SJaegeuk Kim /* add it into sit main buffer */ 740351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 741351df4b2SJaegeuk Kim 742351df4b2SJaegeuk Kim update_sit_entry(sbi, addr, -1); 743351df4b2SJaegeuk Kim 744351df4b2SJaegeuk Kim /* add it into dirty seglist */ 745351df4b2SJaegeuk Kim locate_dirty_segment(sbi, segno); 746351df4b2SJaegeuk Kim 747351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 748351df4b2SJaegeuk Kim } 749351df4b2SJaegeuk Kim 7506e2c64adSJaegeuk Kim bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) 7516e2c64adSJaegeuk Kim { 7526e2c64adSJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 7536e2c64adSJaegeuk Kim unsigned int segno, offset; 7546e2c64adSJaegeuk Kim struct seg_entry *se; 7556e2c64adSJaegeuk Kim bool is_cp = false; 7566e2c64adSJaegeuk Kim 7576e2c64adSJaegeuk Kim if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) 7586e2c64adSJaegeuk Kim return true; 7596e2c64adSJaegeuk Kim 7606e2c64adSJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 7616e2c64adSJaegeuk Kim 7626e2c64adSJaegeuk Kim segno = GET_SEGNO(sbi, blkaddr); 7636e2c64adSJaegeuk Kim se = get_seg_entry(sbi, segno); 7646e2c64adSJaegeuk Kim offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 7656e2c64adSJaegeuk Kim 7666e2c64adSJaegeuk Kim if (f2fs_test_bit(offset, se->ckpt_valid_map)) 7676e2c64adSJaegeuk Kim is_cp = true; 7686e2c64adSJaegeuk Kim 7696e2c64adSJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 7706e2c64adSJaegeuk Kim 7716e2c64adSJaegeuk Kim return is_cp; 7726e2c64adSJaegeuk Kim } 7736e2c64adSJaegeuk Kim 7740a8165d7SJaegeuk Kim /* 775351df4b2SJaegeuk Kim * This function should be resided under the curseg_mutex lock 776351df4b2SJaegeuk Kim */ 777351df4b2SJaegeuk Kim static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, 778e79efe3bSHaicheng Li struct f2fs_summary *sum) 779351df4b2SJaegeuk Kim { 780351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 781351df4b2SJaegeuk Kim void *addr = curseg->sum_blk; 782e79efe3bSHaicheng Li addr += curseg->next_blkoff * sizeof(struct f2fs_summary); 783351df4b2SJaegeuk Kim memcpy(addr, sum, sizeof(struct f2fs_summary)); 784351df4b2SJaegeuk Kim } 785351df4b2SJaegeuk Kim 7860a8165d7SJaegeuk Kim /* 787351df4b2SJaegeuk Kim * Calculate the number of current summary pages for writing 788351df4b2SJaegeuk Kim */ 7893fa06d7bSChao Yu int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) 790351df4b2SJaegeuk Kim { 791351df4b2SJaegeuk Kim int valid_sum_count = 0; 7929a47938bSFan Li int i, sum_in_page; 793351df4b2SJaegeuk Kim 794351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 795351df4b2SJaegeuk Kim if (sbi->ckpt->alloc_type[i] == SSR) 796351df4b2SJaegeuk Kim valid_sum_count += sbi->blocks_per_seg; 7973fa06d7bSChao Yu else { 7983fa06d7bSChao Yu if (for_ra) 7993fa06d7bSChao Yu valid_sum_count += le16_to_cpu( 8003fa06d7bSChao Yu F2FS_CKPT(sbi)->cur_data_blkoff[i]); 801351df4b2SJaegeuk Kim else 802351df4b2SJaegeuk Kim valid_sum_count += curseg_blkoff(sbi, i); 803351df4b2SJaegeuk Kim } 8043fa06d7bSChao Yu } 805351df4b2SJaegeuk Kim 8069a47938bSFan Li sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE - 8079a47938bSFan Li SUM_FOOTER_SIZE) / SUMMARY_SIZE; 8089a47938bSFan Li if (valid_sum_count <= sum_in_page) 809351df4b2SJaegeuk Kim return 1; 8109a47938bSFan Li else if ((valid_sum_count - sum_in_page) <= 8119a47938bSFan Li (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) 812351df4b2SJaegeuk Kim return 2; 813351df4b2SJaegeuk Kim return 3; 814351df4b2SJaegeuk Kim } 815351df4b2SJaegeuk Kim 8160a8165d7SJaegeuk Kim /* 817351df4b2SJaegeuk Kim * Caller should put this summary page 818351df4b2SJaegeuk Kim */ 819351df4b2SJaegeuk Kim struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) 820351df4b2SJaegeuk Kim { 821351df4b2SJaegeuk Kim return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno)); 822351df4b2SJaegeuk Kim } 823351df4b2SJaegeuk Kim 824381722d2SChao Yu void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr) 825381722d2SChao Yu { 826381722d2SChao Yu struct page *page = grab_meta_page(sbi, blk_addr); 827381722d2SChao Yu void *dst = page_address(page); 828381722d2SChao Yu 829381722d2SChao Yu if (src) 830381722d2SChao Yu memcpy(dst, src, PAGE_CACHE_SIZE); 831381722d2SChao Yu else 832381722d2SChao Yu memset(dst, 0, PAGE_CACHE_SIZE); 833381722d2SChao Yu set_page_dirty(page); 834381722d2SChao Yu f2fs_put_page(page, 1); 835381722d2SChao Yu } 836381722d2SChao Yu 837351df4b2SJaegeuk Kim static void write_sum_page(struct f2fs_sb_info *sbi, 838351df4b2SJaegeuk Kim struct f2fs_summary_block *sum_blk, block_t blk_addr) 839351df4b2SJaegeuk Kim { 840381722d2SChao Yu update_meta_page(sbi, (void *)sum_blk, blk_addr); 841351df4b2SJaegeuk Kim } 842351df4b2SJaegeuk Kim 84360374688SJaegeuk Kim static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) 84460374688SJaegeuk Kim { 84560374688SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 84681fb5e87SHaicheng Li unsigned int segno = curseg->segno + 1; 84760374688SJaegeuk Kim struct free_segmap_info *free_i = FREE_I(sbi); 84860374688SJaegeuk Kim 8497cd8558bSJaegeuk Kim if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec) 85081fb5e87SHaicheng Li return !test_bit(segno, free_i->free_segmap); 85160374688SJaegeuk Kim return 0; 85260374688SJaegeuk Kim } 85360374688SJaegeuk Kim 8540a8165d7SJaegeuk Kim /* 855351df4b2SJaegeuk Kim * Find a new segment from the free segments bitmap to right order 856351df4b2SJaegeuk Kim * This function should be returned with success, otherwise BUG 857351df4b2SJaegeuk Kim */ 858351df4b2SJaegeuk Kim static void get_new_segment(struct f2fs_sb_info *sbi, 859351df4b2SJaegeuk Kim unsigned int *newseg, bool new_sec, int dir) 860351df4b2SJaegeuk Kim { 861351df4b2SJaegeuk Kim struct free_segmap_info *free_i = FREE_I(sbi); 862351df4b2SJaegeuk Kim unsigned int segno, secno, zoneno; 8637cd8558bSJaegeuk Kim unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone; 864351df4b2SJaegeuk Kim unsigned int hint = *newseg / sbi->segs_per_sec; 865351df4b2SJaegeuk Kim unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg); 866351df4b2SJaegeuk Kim unsigned int left_start = hint; 867351df4b2SJaegeuk Kim bool init = true; 868351df4b2SJaegeuk Kim int go_left = 0; 869351df4b2SJaegeuk Kim int i; 870351df4b2SJaegeuk Kim 8711a118ccfSChao Yu spin_lock(&free_i->segmap_lock); 872351df4b2SJaegeuk Kim 873351df4b2SJaegeuk Kim if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { 874351df4b2SJaegeuk Kim segno = find_next_zero_bit(free_i->free_segmap, 8757cd8558bSJaegeuk Kim MAIN_SEGS(sbi), *newseg + 1); 87633afa7fdSJaegeuk Kim if (segno - *newseg < sbi->segs_per_sec - 87733afa7fdSJaegeuk Kim (*newseg % sbi->segs_per_sec)) 878351df4b2SJaegeuk Kim goto got_it; 879351df4b2SJaegeuk Kim } 880351df4b2SJaegeuk Kim find_other_zone: 8817cd8558bSJaegeuk Kim secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); 8827cd8558bSJaegeuk Kim if (secno >= MAIN_SECS(sbi)) { 883351df4b2SJaegeuk Kim if (dir == ALLOC_RIGHT) { 884351df4b2SJaegeuk Kim secno = find_next_zero_bit(free_i->free_secmap, 8857cd8558bSJaegeuk Kim MAIN_SECS(sbi), 0); 8867cd8558bSJaegeuk Kim f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi)); 887351df4b2SJaegeuk Kim } else { 888351df4b2SJaegeuk Kim go_left = 1; 889351df4b2SJaegeuk Kim left_start = hint - 1; 890351df4b2SJaegeuk Kim } 891351df4b2SJaegeuk Kim } 892351df4b2SJaegeuk Kim if (go_left == 0) 893351df4b2SJaegeuk Kim goto skip_left; 894351df4b2SJaegeuk Kim 895351df4b2SJaegeuk Kim while (test_bit(left_start, free_i->free_secmap)) { 896351df4b2SJaegeuk Kim if (left_start > 0) { 897351df4b2SJaegeuk Kim left_start--; 898351df4b2SJaegeuk Kim continue; 899351df4b2SJaegeuk Kim } 900351df4b2SJaegeuk Kim left_start = find_next_zero_bit(free_i->free_secmap, 9017cd8558bSJaegeuk Kim MAIN_SECS(sbi), 0); 9027cd8558bSJaegeuk Kim f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi)); 903351df4b2SJaegeuk Kim break; 904351df4b2SJaegeuk Kim } 905351df4b2SJaegeuk Kim secno = left_start; 906351df4b2SJaegeuk Kim skip_left: 907351df4b2SJaegeuk Kim hint = secno; 908351df4b2SJaegeuk Kim segno = secno * sbi->segs_per_sec; 909351df4b2SJaegeuk Kim zoneno = secno / sbi->secs_per_zone; 910351df4b2SJaegeuk Kim 911351df4b2SJaegeuk Kim /* give up on finding another zone */ 912351df4b2SJaegeuk Kim if (!init) 913351df4b2SJaegeuk Kim goto got_it; 914351df4b2SJaegeuk Kim if (sbi->secs_per_zone == 1) 915351df4b2SJaegeuk Kim goto got_it; 916351df4b2SJaegeuk Kim if (zoneno == old_zoneno) 917351df4b2SJaegeuk Kim goto got_it; 918351df4b2SJaegeuk Kim if (dir == ALLOC_LEFT) { 919351df4b2SJaegeuk Kim if (!go_left && zoneno + 1 >= total_zones) 920351df4b2SJaegeuk Kim goto got_it; 921351df4b2SJaegeuk Kim if (go_left && zoneno == 0) 922351df4b2SJaegeuk Kim goto got_it; 923351df4b2SJaegeuk Kim } 924351df4b2SJaegeuk Kim for (i = 0; i < NR_CURSEG_TYPE; i++) 925351df4b2SJaegeuk Kim if (CURSEG_I(sbi, i)->zone == zoneno) 926351df4b2SJaegeuk Kim break; 927351df4b2SJaegeuk Kim 928351df4b2SJaegeuk Kim if (i < NR_CURSEG_TYPE) { 929351df4b2SJaegeuk Kim /* zone is in user, try another */ 930351df4b2SJaegeuk Kim if (go_left) 931351df4b2SJaegeuk Kim hint = zoneno * sbi->secs_per_zone - 1; 932351df4b2SJaegeuk Kim else if (zoneno + 1 >= total_zones) 933351df4b2SJaegeuk Kim hint = 0; 934351df4b2SJaegeuk Kim else 935351df4b2SJaegeuk Kim hint = (zoneno + 1) * sbi->secs_per_zone; 936351df4b2SJaegeuk Kim init = false; 937351df4b2SJaegeuk Kim goto find_other_zone; 938351df4b2SJaegeuk Kim } 939351df4b2SJaegeuk Kim got_it: 940351df4b2SJaegeuk Kim /* set it as dirty segment in free segmap */ 9419850cf4aSJaegeuk Kim f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); 942351df4b2SJaegeuk Kim __set_inuse(sbi, segno); 943351df4b2SJaegeuk Kim *newseg = segno; 9441a118ccfSChao Yu spin_unlock(&free_i->segmap_lock); 945351df4b2SJaegeuk Kim } 946351df4b2SJaegeuk Kim 947351df4b2SJaegeuk Kim static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) 948351df4b2SJaegeuk Kim { 949351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 950351df4b2SJaegeuk Kim struct summary_footer *sum_footer; 951351df4b2SJaegeuk Kim 952351df4b2SJaegeuk Kim curseg->segno = curseg->next_segno; 953351df4b2SJaegeuk Kim curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno); 954351df4b2SJaegeuk Kim curseg->next_blkoff = 0; 955351df4b2SJaegeuk Kim curseg->next_segno = NULL_SEGNO; 956351df4b2SJaegeuk Kim 957351df4b2SJaegeuk Kim sum_footer = &(curseg->sum_blk->footer); 958351df4b2SJaegeuk Kim memset(sum_footer, 0, sizeof(struct summary_footer)); 959351df4b2SJaegeuk Kim if (IS_DATASEG(type)) 960351df4b2SJaegeuk Kim SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); 961351df4b2SJaegeuk Kim if (IS_NODESEG(type)) 962351df4b2SJaegeuk Kim SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); 963351df4b2SJaegeuk Kim __set_sit_entry_type(sbi, type, curseg->segno, modified); 964351df4b2SJaegeuk Kim } 965351df4b2SJaegeuk Kim 9660a8165d7SJaegeuk Kim /* 967351df4b2SJaegeuk Kim * Allocate a current working segment. 968351df4b2SJaegeuk Kim * This function always allocates a free segment in LFS manner. 969351df4b2SJaegeuk Kim */ 970351df4b2SJaegeuk Kim static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) 971351df4b2SJaegeuk Kim { 972351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 973351df4b2SJaegeuk Kim unsigned int segno = curseg->segno; 974351df4b2SJaegeuk Kim int dir = ALLOC_LEFT; 975351df4b2SJaegeuk Kim 976351df4b2SJaegeuk Kim write_sum_page(sbi, curseg->sum_blk, 97781fb5e87SHaicheng Li GET_SUM_BLOCK(sbi, segno)); 978351df4b2SJaegeuk Kim if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA) 979351df4b2SJaegeuk Kim dir = ALLOC_RIGHT; 980351df4b2SJaegeuk Kim 981351df4b2SJaegeuk Kim if (test_opt(sbi, NOHEAP)) 982351df4b2SJaegeuk Kim dir = ALLOC_RIGHT; 983351df4b2SJaegeuk Kim 984351df4b2SJaegeuk Kim get_new_segment(sbi, &segno, new_sec, dir); 985351df4b2SJaegeuk Kim curseg->next_segno = segno; 986351df4b2SJaegeuk Kim reset_curseg(sbi, type, 1); 987351df4b2SJaegeuk Kim curseg->alloc_type = LFS; 988351df4b2SJaegeuk Kim } 989351df4b2SJaegeuk Kim 990351df4b2SJaegeuk Kim static void __next_free_blkoff(struct f2fs_sb_info *sbi, 991351df4b2SJaegeuk Kim struct curseg_info *seg, block_t start) 992351df4b2SJaegeuk Kim { 993351df4b2SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, seg->segno); 994e81c93cfSChangman Lee int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 99560a3b782SJaegeuk Kim unsigned long *target_map = SIT_I(sbi)->tmp_map; 996e81c93cfSChangman Lee unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 997e81c93cfSChangman Lee unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 998e81c93cfSChangman Lee int i, pos; 999e81c93cfSChangman Lee 1000e81c93cfSChangman Lee for (i = 0; i < entries; i++) 1001e81c93cfSChangman Lee target_map[i] = ckpt_map[i] | cur_map[i]; 1002e81c93cfSChangman Lee 1003e81c93cfSChangman Lee pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start); 1004e81c93cfSChangman Lee 1005e81c93cfSChangman Lee seg->next_blkoff = pos; 1006351df4b2SJaegeuk Kim } 1007351df4b2SJaegeuk Kim 10080a8165d7SJaegeuk Kim /* 1009351df4b2SJaegeuk Kim * If a segment is written by LFS manner, next block offset is just obtained 1010351df4b2SJaegeuk Kim * by increasing the current block offset. However, if a segment is written by 1011351df4b2SJaegeuk Kim * SSR manner, next block offset obtained by calling __next_free_blkoff 1012351df4b2SJaegeuk Kim */ 1013351df4b2SJaegeuk Kim static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, 1014351df4b2SJaegeuk Kim struct curseg_info *seg) 1015351df4b2SJaegeuk Kim { 1016351df4b2SJaegeuk Kim if (seg->alloc_type == SSR) 1017351df4b2SJaegeuk Kim __next_free_blkoff(sbi, seg, seg->next_blkoff + 1); 1018351df4b2SJaegeuk Kim else 1019351df4b2SJaegeuk Kim seg->next_blkoff++; 1020351df4b2SJaegeuk Kim } 1021351df4b2SJaegeuk Kim 10220a8165d7SJaegeuk Kim /* 1023351df4b2SJaegeuk Kim * This function always allocates a used segment(from dirty seglist) by SSR 1024351df4b2SJaegeuk Kim * manner, so it should recover the existing segment information of valid blocks 1025351df4b2SJaegeuk Kim */ 1026351df4b2SJaegeuk Kim static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse) 1027351df4b2SJaegeuk Kim { 1028351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1029351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 1030351df4b2SJaegeuk Kim unsigned int new_segno = curseg->next_segno; 1031351df4b2SJaegeuk Kim struct f2fs_summary_block *sum_node; 1032351df4b2SJaegeuk Kim struct page *sum_page; 1033351df4b2SJaegeuk Kim 1034351df4b2SJaegeuk Kim write_sum_page(sbi, curseg->sum_blk, 1035351df4b2SJaegeuk Kim GET_SUM_BLOCK(sbi, curseg->segno)); 1036351df4b2SJaegeuk Kim __set_test_and_inuse(sbi, new_segno); 1037351df4b2SJaegeuk Kim 1038351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 1039351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, new_segno, PRE); 1040351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, new_segno, DIRTY); 1041351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 1042351df4b2SJaegeuk Kim 1043351df4b2SJaegeuk Kim reset_curseg(sbi, type, 1); 1044351df4b2SJaegeuk Kim curseg->alloc_type = SSR; 1045351df4b2SJaegeuk Kim __next_free_blkoff(sbi, curseg, 0); 1046351df4b2SJaegeuk Kim 1047351df4b2SJaegeuk Kim if (reuse) { 1048351df4b2SJaegeuk Kim sum_page = get_sum_page(sbi, new_segno); 1049351df4b2SJaegeuk Kim sum_node = (struct f2fs_summary_block *)page_address(sum_page); 1050351df4b2SJaegeuk Kim memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); 1051351df4b2SJaegeuk Kim f2fs_put_page(sum_page, 1); 1052351df4b2SJaegeuk Kim } 1053351df4b2SJaegeuk Kim } 1054351df4b2SJaegeuk Kim 105543727527SJaegeuk Kim static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) 105643727527SJaegeuk Kim { 105743727527SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 105843727527SJaegeuk Kim const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops; 105943727527SJaegeuk Kim 106043727527SJaegeuk Kim if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0)) 106143727527SJaegeuk Kim return v_ops->get_victim(sbi, 106243727527SJaegeuk Kim &(curseg)->next_segno, BG_GC, type, SSR); 106343727527SJaegeuk Kim 106443727527SJaegeuk Kim /* For data segments, let's do SSR more intensively */ 106543727527SJaegeuk Kim for (; type >= CURSEG_HOT_DATA; type--) 106643727527SJaegeuk Kim if (v_ops->get_victim(sbi, &(curseg)->next_segno, 106743727527SJaegeuk Kim BG_GC, type, SSR)) 106843727527SJaegeuk Kim return 1; 106943727527SJaegeuk Kim return 0; 107043727527SJaegeuk Kim } 107143727527SJaegeuk Kim 1072351df4b2SJaegeuk Kim /* 1073351df4b2SJaegeuk Kim * flush out current segment and replace it with new segment 1074351df4b2SJaegeuk Kim * This function should be returned with success, otherwise BUG 1075351df4b2SJaegeuk Kim */ 1076351df4b2SJaegeuk Kim static void allocate_segment_by_default(struct f2fs_sb_info *sbi, 1077351df4b2SJaegeuk Kim int type, bool force) 1078351df4b2SJaegeuk Kim { 1079351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 1080351df4b2SJaegeuk Kim 10817b405275SGu Zheng if (force) 1082351df4b2SJaegeuk Kim new_curseg(sbi, type, true); 10837b405275SGu Zheng else if (type == CURSEG_WARM_NODE) 1084351df4b2SJaegeuk Kim new_curseg(sbi, type, false); 108560374688SJaegeuk Kim else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type)) 108660374688SJaegeuk Kim new_curseg(sbi, type, false); 1087351df4b2SJaegeuk Kim else if (need_SSR(sbi) && get_ssr_segment(sbi, type)) 1088351df4b2SJaegeuk Kim change_curseg(sbi, type, true); 1089351df4b2SJaegeuk Kim else 1090351df4b2SJaegeuk Kim new_curseg(sbi, type, false); 1091dcdfff65SJaegeuk Kim 1092dcdfff65SJaegeuk Kim stat_inc_seg_type(sbi, curseg); 1093351df4b2SJaegeuk Kim } 1094351df4b2SJaegeuk Kim 109538aa0889SJaegeuk Kim static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type) 109638aa0889SJaegeuk Kim { 109738aa0889SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 109838aa0889SJaegeuk Kim unsigned int old_segno; 109938aa0889SJaegeuk Kim 110038aa0889SJaegeuk Kim old_segno = curseg->segno; 110138aa0889SJaegeuk Kim SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true); 110238aa0889SJaegeuk Kim locate_dirty_segment(sbi, old_segno); 110338aa0889SJaegeuk Kim } 110438aa0889SJaegeuk Kim 1105351df4b2SJaegeuk Kim void allocate_new_segments(struct f2fs_sb_info *sbi) 1106351df4b2SJaegeuk Kim { 1107351df4b2SJaegeuk Kim int i; 1108351df4b2SJaegeuk Kim 110938aa0889SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) 111038aa0889SJaegeuk Kim __allocate_new_segments(sbi, i); 1111351df4b2SJaegeuk Kim } 1112351df4b2SJaegeuk Kim 1113351df4b2SJaegeuk Kim static const struct segment_allocation default_salloc_ops = { 1114351df4b2SJaegeuk Kim .allocate_segment = allocate_segment_by_default, 1115351df4b2SJaegeuk Kim }; 1116351df4b2SJaegeuk Kim 11174b2fecc8SJaegeuk Kim int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) 11184b2fecc8SJaegeuk Kim { 1119f7ef9b83SJaegeuk Kim __u64 start = F2FS_BYTES_TO_BLK(range->start); 1120f7ef9b83SJaegeuk Kim __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; 11214b2fecc8SJaegeuk Kim unsigned int start_segno, end_segno; 11224b2fecc8SJaegeuk Kim struct cp_control cpc; 1123c34f42e2SChao Yu int err = 0; 11244b2fecc8SJaegeuk Kim 1125836b5a63SJaegeuk Kim if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) 11264b2fecc8SJaegeuk Kim return -EINVAL; 11274b2fecc8SJaegeuk Kim 11289bd27ae4SJan Kara cpc.trimmed = 0; 11297cd8558bSJaegeuk Kim if (end <= MAIN_BLKADDR(sbi)) 11304b2fecc8SJaegeuk Kim goto out; 11314b2fecc8SJaegeuk Kim 11324b2fecc8SJaegeuk Kim /* start/end segment number in main_area */ 11337cd8558bSJaegeuk Kim start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); 11347cd8558bSJaegeuk Kim end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : 11357cd8558bSJaegeuk Kim GET_SEGNO(sbi, end); 11364b2fecc8SJaegeuk Kim cpc.reason = CP_DISCARD; 1137836b5a63SJaegeuk Kim cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen)); 11384b2fecc8SJaegeuk Kim 11394b2fecc8SJaegeuk Kim /* do checkpoint to issue discard commands safely */ 1140bba681cbSJaegeuk Kim for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) { 1141bba681cbSJaegeuk Kim cpc.trim_start = start_segno; 1142a66cdd98SJaegeuk Kim 1143a66cdd98SJaegeuk Kim if (sbi->discard_blks == 0) 1144a66cdd98SJaegeuk Kim break; 1145a66cdd98SJaegeuk Kim else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi)) 1146a66cdd98SJaegeuk Kim cpc.trim_end = end_segno; 1147a66cdd98SJaegeuk Kim else 1148a66cdd98SJaegeuk Kim cpc.trim_end = min_t(unsigned int, 1149a66cdd98SJaegeuk Kim rounddown(start_segno + 1150bba681cbSJaegeuk Kim BATCHED_TRIM_SEGMENTS(sbi), 1151bba681cbSJaegeuk Kim sbi->segs_per_sec) - 1, end_segno); 1152bba681cbSJaegeuk Kim 1153ca4b02eeSJaegeuk Kim mutex_lock(&sbi->gc_mutex); 1154c34f42e2SChao Yu err = write_checkpoint(sbi, &cpc); 1155ca4b02eeSJaegeuk Kim mutex_unlock(&sbi->gc_mutex); 1156bba681cbSJaegeuk Kim } 11574b2fecc8SJaegeuk Kim out: 1158f7ef9b83SJaegeuk Kim range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); 1159c34f42e2SChao Yu return err; 11604b2fecc8SJaegeuk Kim } 11614b2fecc8SJaegeuk Kim 1162351df4b2SJaegeuk Kim static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) 1163351df4b2SJaegeuk Kim { 1164351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 1165351df4b2SJaegeuk Kim if (curseg->next_blkoff < sbi->blocks_per_seg) 1166351df4b2SJaegeuk Kim return true; 1167351df4b2SJaegeuk Kim return false; 1168351df4b2SJaegeuk Kim } 1169351df4b2SJaegeuk Kim 1170351df4b2SJaegeuk Kim static int __get_segment_type_2(struct page *page, enum page_type p_type) 1171351df4b2SJaegeuk Kim { 1172351df4b2SJaegeuk Kim if (p_type == DATA) 1173351df4b2SJaegeuk Kim return CURSEG_HOT_DATA; 1174351df4b2SJaegeuk Kim else 1175351df4b2SJaegeuk Kim return CURSEG_HOT_NODE; 1176351df4b2SJaegeuk Kim } 1177351df4b2SJaegeuk Kim 1178351df4b2SJaegeuk Kim static int __get_segment_type_4(struct page *page, enum page_type p_type) 1179351df4b2SJaegeuk Kim { 1180351df4b2SJaegeuk Kim if (p_type == DATA) { 1181351df4b2SJaegeuk Kim struct inode *inode = page->mapping->host; 1182351df4b2SJaegeuk Kim 1183351df4b2SJaegeuk Kim if (S_ISDIR(inode->i_mode)) 1184351df4b2SJaegeuk Kim return CURSEG_HOT_DATA; 1185351df4b2SJaegeuk Kim else 1186351df4b2SJaegeuk Kim return CURSEG_COLD_DATA; 1187351df4b2SJaegeuk Kim } else { 1188a344b9fdSJaegeuk Kim if (IS_DNODE(page) && is_cold_node(page)) 1189a344b9fdSJaegeuk Kim return CURSEG_WARM_NODE; 1190351df4b2SJaegeuk Kim else 1191351df4b2SJaegeuk Kim return CURSEG_COLD_NODE; 1192351df4b2SJaegeuk Kim } 1193351df4b2SJaegeuk Kim } 1194351df4b2SJaegeuk Kim 1195351df4b2SJaegeuk Kim static int __get_segment_type_6(struct page *page, enum page_type p_type) 1196351df4b2SJaegeuk Kim { 1197351df4b2SJaegeuk Kim if (p_type == DATA) { 1198351df4b2SJaegeuk Kim struct inode *inode = page->mapping->host; 1199351df4b2SJaegeuk Kim 1200351df4b2SJaegeuk Kim if (S_ISDIR(inode->i_mode)) 1201351df4b2SJaegeuk Kim return CURSEG_HOT_DATA; 1202354a3399SJaegeuk Kim else if (is_cold_data(page) || file_is_cold(inode)) 1203351df4b2SJaegeuk Kim return CURSEG_COLD_DATA; 1204351df4b2SJaegeuk Kim else 1205351df4b2SJaegeuk Kim return CURSEG_WARM_DATA; 1206351df4b2SJaegeuk Kim } else { 1207351df4b2SJaegeuk Kim if (IS_DNODE(page)) 1208351df4b2SJaegeuk Kim return is_cold_node(page) ? CURSEG_WARM_NODE : 1209351df4b2SJaegeuk Kim CURSEG_HOT_NODE; 1210351df4b2SJaegeuk Kim else 1211351df4b2SJaegeuk Kim return CURSEG_COLD_NODE; 1212351df4b2SJaegeuk Kim } 1213351df4b2SJaegeuk Kim } 1214351df4b2SJaegeuk Kim 1215351df4b2SJaegeuk Kim static int __get_segment_type(struct page *page, enum page_type p_type) 1216351df4b2SJaegeuk Kim { 12174081363fSJaegeuk Kim switch (F2FS_P_SB(page)->active_logs) { 1218351df4b2SJaegeuk Kim case 2: 1219351df4b2SJaegeuk Kim return __get_segment_type_2(page, p_type); 1220351df4b2SJaegeuk Kim case 4: 1221351df4b2SJaegeuk Kim return __get_segment_type_4(page, p_type); 1222351df4b2SJaegeuk Kim } 122312a67146SJaegeuk Kim /* NR_CURSEG_TYPE(6) logs by default */ 12249850cf4aSJaegeuk Kim f2fs_bug_on(F2FS_P_SB(page), 12259850cf4aSJaegeuk Kim F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE); 122612a67146SJaegeuk Kim return __get_segment_type_6(page, p_type); 1227351df4b2SJaegeuk Kim } 1228351df4b2SJaegeuk Kim 1229bfad7c2dSJaegeuk Kim void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 1230351df4b2SJaegeuk Kim block_t old_blkaddr, block_t *new_blkaddr, 1231bfad7c2dSJaegeuk Kim struct f2fs_summary *sum, int type) 1232351df4b2SJaegeuk Kim { 1233351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1234351df4b2SJaegeuk Kim struct curseg_info *curseg; 123538aa0889SJaegeuk Kim bool direct_io = (type == CURSEG_DIRECT_IO); 123638aa0889SJaegeuk Kim 123738aa0889SJaegeuk Kim type = direct_io ? CURSEG_WARM_DATA : type; 1238351df4b2SJaegeuk Kim 1239351df4b2SJaegeuk Kim curseg = CURSEG_I(sbi, type); 1240351df4b2SJaegeuk Kim 1241351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 124221cb1d99SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 1243351df4b2SJaegeuk Kim 124438aa0889SJaegeuk Kim /* direct_io'ed data is aligned to the segment for better performance */ 124547e70ca4SJaegeuk Kim if (direct_io && curseg->next_blkoff && 124647e70ca4SJaegeuk Kim !has_not_enough_free_secs(sbi, 0)) 124738aa0889SJaegeuk Kim __allocate_new_segments(sbi, type); 124838aa0889SJaegeuk Kim 1249351df4b2SJaegeuk Kim *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 1250351df4b2SJaegeuk Kim 1251351df4b2SJaegeuk Kim /* 1252351df4b2SJaegeuk Kim * __add_sum_entry should be resided under the curseg_mutex 1253351df4b2SJaegeuk Kim * because, this function updates a summary entry in the 1254351df4b2SJaegeuk Kim * current summary block. 1255351df4b2SJaegeuk Kim */ 1256e79efe3bSHaicheng Li __add_sum_entry(sbi, type, sum); 1257351df4b2SJaegeuk Kim 1258351df4b2SJaegeuk Kim __refresh_next_blkoff(sbi, curseg); 1259dcdfff65SJaegeuk Kim 1260dcdfff65SJaegeuk Kim stat_inc_block_count(sbi, curseg); 1261351df4b2SJaegeuk Kim 12625e443818SJaegeuk Kim if (!__has_curseg_space(sbi, type)) 12635e443818SJaegeuk Kim sit_i->s_ops->allocate_segment(sbi, type, false); 1264351df4b2SJaegeuk Kim /* 1265351df4b2SJaegeuk Kim * SIT information should be updated before segment allocation, 1266351df4b2SJaegeuk Kim * since SSR needs latest valid block information. 1267351df4b2SJaegeuk Kim */ 1268351df4b2SJaegeuk Kim refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); 12695e443818SJaegeuk Kim 1270351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 1271351df4b2SJaegeuk Kim 1272bfad7c2dSJaegeuk Kim if (page && IS_NODESEG(type)) 1273351df4b2SJaegeuk Kim fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); 1274351df4b2SJaegeuk Kim 1275bfad7c2dSJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 1276bfad7c2dSJaegeuk Kim } 1277bfad7c2dSJaegeuk Kim 127805ca3632SJaegeuk Kim static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) 1279bfad7c2dSJaegeuk Kim { 128005ca3632SJaegeuk Kim int type = __get_segment_type(fio->page, fio->type); 1281bfad7c2dSJaegeuk Kim 128205ca3632SJaegeuk Kim allocate_data_block(fio->sbi, fio->page, fio->blk_addr, 128305ca3632SJaegeuk Kim &fio->blk_addr, sum, type); 1284bfad7c2dSJaegeuk Kim 1285351df4b2SJaegeuk Kim /* writeout dirty page into bdev */ 128605ca3632SJaegeuk Kim f2fs_submit_page_mbio(fio); 1287351df4b2SJaegeuk Kim } 1288351df4b2SJaegeuk Kim 1289577e3495SJaegeuk Kim void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) 1290351df4b2SJaegeuk Kim { 1291458e6197SJaegeuk Kim struct f2fs_io_info fio = { 129205ca3632SJaegeuk Kim .sbi = sbi, 1293458e6197SJaegeuk Kim .type = META, 1294cf04e8ebSJaegeuk Kim .rw = WRITE_SYNC | REQ_META | REQ_PRIO, 1295cf04e8ebSJaegeuk Kim .blk_addr = page->index, 129605ca3632SJaegeuk Kim .page = page, 12974375a336SJaegeuk Kim .encrypted_page = NULL, 1298458e6197SJaegeuk Kim }; 1299458e6197SJaegeuk Kim 13002b947003SChao Yu if (unlikely(page->index >= MAIN_BLKADDR(sbi))) 13012b947003SChao Yu fio.rw &= ~REQ_META; 13022b947003SChao Yu 1303351df4b2SJaegeuk Kim set_page_writeback(page); 130405ca3632SJaegeuk Kim f2fs_submit_page_mbio(&fio); 1305351df4b2SJaegeuk Kim } 1306351df4b2SJaegeuk Kim 130705ca3632SJaegeuk Kim void write_node_page(unsigned int nid, struct f2fs_io_info *fio) 1308351df4b2SJaegeuk Kim { 1309351df4b2SJaegeuk Kim struct f2fs_summary sum; 131005ca3632SJaegeuk Kim 1311351df4b2SJaegeuk Kim set_summary(&sum, nid, 0, 0); 131205ca3632SJaegeuk Kim do_write_page(&sum, fio); 1313351df4b2SJaegeuk Kim } 1314351df4b2SJaegeuk Kim 131505ca3632SJaegeuk Kim void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio) 1316351df4b2SJaegeuk Kim { 131705ca3632SJaegeuk Kim struct f2fs_sb_info *sbi = fio->sbi; 1318351df4b2SJaegeuk Kim struct f2fs_summary sum; 1319351df4b2SJaegeuk Kim struct node_info ni; 1320351df4b2SJaegeuk Kim 13219850cf4aSJaegeuk Kim f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); 1322351df4b2SJaegeuk Kim get_node_info(sbi, dn->nid, &ni); 1323351df4b2SJaegeuk Kim set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 132405ca3632SJaegeuk Kim do_write_page(&sum, fio); 1325e1509cf2SJaegeuk Kim dn->data_blkaddr = fio->blk_addr; 1326351df4b2SJaegeuk Kim } 1327351df4b2SJaegeuk Kim 132805ca3632SJaegeuk Kim void rewrite_data_page(struct f2fs_io_info *fio) 1329351df4b2SJaegeuk Kim { 133005ca3632SJaegeuk Kim stat_inc_inplace_blocks(fio->sbi); 133105ca3632SJaegeuk Kim f2fs_submit_page_mbio(fio); 1332351df4b2SJaegeuk Kim } 1333351df4b2SJaegeuk Kim 1334528e3459SChao Yu static void __f2fs_replace_block(struct f2fs_sb_info *sbi, 1335528e3459SChao Yu struct f2fs_summary *sum, 133619f106bcSChao Yu block_t old_blkaddr, block_t new_blkaddr, 133719f106bcSChao Yu bool recover_curseg) 1338351df4b2SJaegeuk Kim { 1339351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1340351df4b2SJaegeuk Kim struct curseg_info *curseg; 1341351df4b2SJaegeuk Kim unsigned int segno, old_cursegno; 1342351df4b2SJaegeuk Kim struct seg_entry *se; 1343351df4b2SJaegeuk Kim int type; 134419f106bcSChao Yu unsigned short old_blkoff; 1345351df4b2SJaegeuk Kim 1346351df4b2SJaegeuk Kim segno = GET_SEGNO(sbi, new_blkaddr); 1347351df4b2SJaegeuk Kim se = get_seg_entry(sbi, segno); 1348351df4b2SJaegeuk Kim type = se->type; 1349351df4b2SJaegeuk Kim 135019f106bcSChao Yu if (!recover_curseg) { 135119f106bcSChao Yu /* for recovery flow */ 1352351df4b2SJaegeuk Kim if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { 1353351df4b2SJaegeuk Kim if (old_blkaddr == NULL_ADDR) 1354351df4b2SJaegeuk Kim type = CURSEG_COLD_DATA; 1355351df4b2SJaegeuk Kim else 1356351df4b2SJaegeuk Kim type = CURSEG_WARM_DATA; 1357351df4b2SJaegeuk Kim } 135819f106bcSChao Yu } else { 135919f106bcSChao Yu if (!IS_CURSEG(sbi, segno)) 136019f106bcSChao Yu type = CURSEG_WARM_DATA; 136119f106bcSChao Yu } 136219f106bcSChao Yu 1363351df4b2SJaegeuk Kim curseg = CURSEG_I(sbi, type); 1364351df4b2SJaegeuk Kim 1365351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 1366351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 1367351df4b2SJaegeuk Kim 1368351df4b2SJaegeuk Kim old_cursegno = curseg->segno; 136919f106bcSChao Yu old_blkoff = curseg->next_blkoff; 1370351df4b2SJaegeuk Kim 1371351df4b2SJaegeuk Kim /* change the current segment */ 1372351df4b2SJaegeuk Kim if (segno != curseg->segno) { 1373351df4b2SJaegeuk Kim curseg->next_segno = segno; 1374351df4b2SJaegeuk Kim change_curseg(sbi, type, true); 1375351df4b2SJaegeuk Kim } 1376351df4b2SJaegeuk Kim 1377491c0854SJaegeuk Kim curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr); 1378e79efe3bSHaicheng Li __add_sum_entry(sbi, type, sum); 1379351df4b2SJaegeuk Kim 13806e2c64adSJaegeuk Kim if (!recover_curseg) 13816e2c64adSJaegeuk Kim update_sit_entry(sbi, new_blkaddr, 1); 13826e2c64adSJaegeuk Kim if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) 13836e2c64adSJaegeuk Kim update_sit_entry(sbi, old_blkaddr, -1); 13846e2c64adSJaegeuk Kim 13856e2c64adSJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 13866e2c64adSJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr)); 13876e2c64adSJaegeuk Kim 1388351df4b2SJaegeuk Kim locate_dirty_segment(sbi, old_cursegno); 1389351df4b2SJaegeuk Kim 139019f106bcSChao Yu if (recover_curseg) { 139119f106bcSChao Yu if (old_cursegno != curseg->segno) { 139219f106bcSChao Yu curseg->next_segno = old_cursegno; 139319f106bcSChao Yu change_curseg(sbi, type, true); 139419f106bcSChao Yu } 139519f106bcSChao Yu curseg->next_blkoff = old_blkoff; 139619f106bcSChao Yu } 139719f106bcSChao Yu 1398351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 1399351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 1400351df4b2SJaegeuk Kim } 1401351df4b2SJaegeuk Kim 1402528e3459SChao Yu void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 1403528e3459SChao Yu block_t old_addr, block_t new_addr, 1404528e3459SChao Yu unsigned char version, bool recover_curseg) 1405528e3459SChao Yu { 1406528e3459SChao Yu struct f2fs_summary sum; 1407528e3459SChao Yu 1408528e3459SChao Yu set_summary(&sum, dn->nid, dn->ofs_in_node, version); 1409528e3459SChao Yu 1410528e3459SChao Yu __f2fs_replace_block(sbi, &sum, old_addr, new_addr, recover_curseg); 1411528e3459SChao Yu 1412528e3459SChao Yu dn->data_blkaddr = new_addr; 1413528e3459SChao Yu set_data_blkaddr(dn); 1414528e3459SChao Yu f2fs_update_extent_cache(dn); 1415528e3459SChao Yu } 1416528e3459SChao Yu 1417df0f8dc0SChao Yu static inline bool is_merged_page(struct f2fs_sb_info *sbi, 1418df0f8dc0SChao Yu struct page *page, enum page_type type) 1419df0f8dc0SChao Yu { 1420df0f8dc0SChao Yu enum page_type btype = PAGE_TYPE_OF_BIO(type); 1421df0f8dc0SChao Yu struct f2fs_bio_info *io = &sbi->write_io[btype]; 1422df0f8dc0SChao Yu struct bio_vec *bvec; 14234375a336SJaegeuk Kim struct page *target; 1424df0f8dc0SChao Yu int i; 1425df0f8dc0SChao Yu 1426df0f8dc0SChao Yu down_read(&io->io_rwsem); 14274375a336SJaegeuk Kim if (!io->bio) { 14284375a336SJaegeuk Kim up_read(&io->io_rwsem); 14294375a336SJaegeuk Kim return false; 14304375a336SJaegeuk Kim } 1431df0f8dc0SChao Yu 1432ce23447fSJaegeuk Kim bio_for_each_segment_all(bvec, io->bio, i) { 14334375a336SJaegeuk Kim 14344375a336SJaegeuk Kim if (bvec->bv_page->mapping) { 14354375a336SJaegeuk Kim target = bvec->bv_page; 14364375a336SJaegeuk Kim } else { 14374375a336SJaegeuk Kim struct f2fs_crypto_ctx *ctx; 14384375a336SJaegeuk Kim 14394375a336SJaegeuk Kim /* encrypted page */ 14404375a336SJaegeuk Kim ctx = (struct f2fs_crypto_ctx *)page_private( 14414375a336SJaegeuk Kim bvec->bv_page); 1442ca40b030SJaegeuk Kim target = ctx->w.control_page; 14434375a336SJaegeuk Kim } 14444375a336SJaegeuk Kim 14454375a336SJaegeuk Kim if (page == target) { 1446df0f8dc0SChao Yu up_read(&io->io_rwsem); 1447df0f8dc0SChao Yu return true; 1448df0f8dc0SChao Yu } 1449df0f8dc0SChao Yu } 1450df0f8dc0SChao Yu 1451df0f8dc0SChao Yu up_read(&io->io_rwsem); 1452df0f8dc0SChao Yu return false; 1453df0f8dc0SChao Yu } 1454df0f8dc0SChao Yu 145593dfe2acSJaegeuk Kim void f2fs_wait_on_page_writeback(struct page *page, 14565514f0aaSYuan Zhong enum page_type type) 145793dfe2acSJaegeuk Kim { 145893dfe2acSJaegeuk Kim if (PageWriteback(page)) { 14594081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_P_SB(page); 14604081363fSJaegeuk Kim 1461df0f8dc0SChao Yu if (is_merged_page(sbi, page, type)) 1462458e6197SJaegeuk Kim f2fs_submit_merged_bio(sbi, type, WRITE); 146393dfe2acSJaegeuk Kim wait_on_page_writeback(page); 146493dfe2acSJaegeuk Kim } 146593dfe2acSJaegeuk Kim } 146693dfe2acSJaegeuk Kim 146708b39fbdSChao Yu void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi, 146808b39fbdSChao Yu block_t blkaddr) 146908b39fbdSChao Yu { 147008b39fbdSChao Yu struct page *cpage; 147108b39fbdSChao Yu 147208b39fbdSChao Yu if (blkaddr == NEW_ADDR) 147308b39fbdSChao Yu return; 147408b39fbdSChao Yu 147508b39fbdSChao Yu f2fs_bug_on(sbi, blkaddr == NULL_ADDR); 147608b39fbdSChao Yu 147708b39fbdSChao Yu cpage = find_lock_page(META_MAPPING(sbi), blkaddr); 147808b39fbdSChao Yu if (cpage) { 147908b39fbdSChao Yu f2fs_wait_on_page_writeback(cpage, DATA); 148008b39fbdSChao Yu f2fs_put_page(cpage, 1); 148108b39fbdSChao Yu } 148208b39fbdSChao Yu } 148308b39fbdSChao Yu 1484351df4b2SJaegeuk Kim static int read_compacted_summaries(struct f2fs_sb_info *sbi) 1485351df4b2SJaegeuk Kim { 1486351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1487351df4b2SJaegeuk Kim struct curseg_info *seg_i; 1488351df4b2SJaegeuk Kim unsigned char *kaddr; 1489351df4b2SJaegeuk Kim struct page *page; 1490351df4b2SJaegeuk Kim block_t start; 1491351df4b2SJaegeuk Kim int i, j, offset; 1492351df4b2SJaegeuk Kim 1493351df4b2SJaegeuk Kim start = start_sum_block(sbi); 1494351df4b2SJaegeuk Kim 1495351df4b2SJaegeuk Kim page = get_meta_page(sbi, start++); 1496351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 1497351df4b2SJaegeuk Kim 1498351df4b2SJaegeuk Kim /* Step 1: restore nat cache */ 1499351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 1500351df4b2SJaegeuk Kim memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE); 1501351df4b2SJaegeuk Kim 1502351df4b2SJaegeuk Kim /* Step 2: restore sit cache */ 1503351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 1504351df4b2SJaegeuk Kim memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE, 1505351df4b2SJaegeuk Kim SUM_JOURNAL_SIZE); 1506351df4b2SJaegeuk Kim offset = 2 * SUM_JOURNAL_SIZE; 1507351df4b2SJaegeuk Kim 1508351df4b2SJaegeuk Kim /* Step 3: restore summary entries */ 1509351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 1510351df4b2SJaegeuk Kim unsigned short blk_off; 1511351df4b2SJaegeuk Kim unsigned int segno; 1512351df4b2SJaegeuk Kim 1513351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, i); 1514351df4b2SJaegeuk Kim segno = le32_to_cpu(ckpt->cur_data_segno[i]); 1515351df4b2SJaegeuk Kim blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); 1516351df4b2SJaegeuk Kim seg_i->next_segno = segno; 1517351df4b2SJaegeuk Kim reset_curseg(sbi, i, 0); 1518351df4b2SJaegeuk Kim seg_i->alloc_type = ckpt->alloc_type[i]; 1519351df4b2SJaegeuk Kim seg_i->next_blkoff = blk_off; 1520351df4b2SJaegeuk Kim 1521351df4b2SJaegeuk Kim if (seg_i->alloc_type == SSR) 1522351df4b2SJaegeuk Kim blk_off = sbi->blocks_per_seg; 1523351df4b2SJaegeuk Kim 1524351df4b2SJaegeuk Kim for (j = 0; j < blk_off; j++) { 1525351df4b2SJaegeuk Kim struct f2fs_summary *s; 1526351df4b2SJaegeuk Kim s = (struct f2fs_summary *)(kaddr + offset); 1527351df4b2SJaegeuk Kim seg_i->sum_blk->entries[j] = *s; 1528351df4b2SJaegeuk Kim offset += SUMMARY_SIZE; 1529351df4b2SJaegeuk Kim if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1530351df4b2SJaegeuk Kim SUM_FOOTER_SIZE) 1531351df4b2SJaegeuk Kim continue; 1532351df4b2SJaegeuk Kim 1533351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 1534351df4b2SJaegeuk Kim page = NULL; 1535351df4b2SJaegeuk Kim 1536351df4b2SJaegeuk Kim page = get_meta_page(sbi, start++); 1537351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 1538351df4b2SJaegeuk Kim offset = 0; 1539351df4b2SJaegeuk Kim } 1540351df4b2SJaegeuk Kim } 1541351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 1542351df4b2SJaegeuk Kim return 0; 1543351df4b2SJaegeuk Kim } 1544351df4b2SJaegeuk Kim 1545351df4b2SJaegeuk Kim static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) 1546351df4b2SJaegeuk Kim { 1547351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1548351df4b2SJaegeuk Kim struct f2fs_summary_block *sum; 1549351df4b2SJaegeuk Kim struct curseg_info *curseg; 1550351df4b2SJaegeuk Kim struct page *new; 1551351df4b2SJaegeuk Kim unsigned short blk_off; 1552351df4b2SJaegeuk Kim unsigned int segno = 0; 1553351df4b2SJaegeuk Kim block_t blk_addr = 0; 1554351df4b2SJaegeuk Kim 1555351df4b2SJaegeuk Kim /* get segment number and block addr */ 1556351df4b2SJaegeuk Kim if (IS_DATASEG(type)) { 1557351df4b2SJaegeuk Kim segno = le32_to_cpu(ckpt->cur_data_segno[type]); 1558351df4b2SJaegeuk Kim blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - 1559351df4b2SJaegeuk Kim CURSEG_HOT_DATA]); 1560119ee914SJaegeuk Kim if (__exist_node_summaries(sbi)) 1561351df4b2SJaegeuk Kim blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); 1562351df4b2SJaegeuk Kim else 1563351df4b2SJaegeuk Kim blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); 1564351df4b2SJaegeuk Kim } else { 1565351df4b2SJaegeuk Kim segno = le32_to_cpu(ckpt->cur_node_segno[type - 1566351df4b2SJaegeuk Kim CURSEG_HOT_NODE]); 1567351df4b2SJaegeuk Kim blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - 1568351df4b2SJaegeuk Kim CURSEG_HOT_NODE]); 1569119ee914SJaegeuk Kim if (__exist_node_summaries(sbi)) 1570351df4b2SJaegeuk Kim blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, 1571351df4b2SJaegeuk Kim type - CURSEG_HOT_NODE); 1572351df4b2SJaegeuk Kim else 1573351df4b2SJaegeuk Kim blk_addr = GET_SUM_BLOCK(sbi, segno); 1574351df4b2SJaegeuk Kim } 1575351df4b2SJaegeuk Kim 1576351df4b2SJaegeuk Kim new = get_meta_page(sbi, blk_addr); 1577351df4b2SJaegeuk Kim sum = (struct f2fs_summary_block *)page_address(new); 1578351df4b2SJaegeuk Kim 1579351df4b2SJaegeuk Kim if (IS_NODESEG(type)) { 1580119ee914SJaegeuk Kim if (__exist_node_summaries(sbi)) { 1581351df4b2SJaegeuk Kim struct f2fs_summary *ns = &sum->entries[0]; 1582351df4b2SJaegeuk Kim int i; 1583351df4b2SJaegeuk Kim for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { 1584351df4b2SJaegeuk Kim ns->version = 0; 1585351df4b2SJaegeuk Kim ns->ofs_in_node = 0; 1586351df4b2SJaegeuk Kim } 1587351df4b2SJaegeuk Kim } else { 1588d653788aSGu Zheng int err; 1589d653788aSGu Zheng 1590d653788aSGu Zheng err = restore_node_summary(sbi, segno, sum); 1591d653788aSGu Zheng if (err) { 1592351df4b2SJaegeuk Kim f2fs_put_page(new, 1); 1593d653788aSGu Zheng return err; 1594351df4b2SJaegeuk Kim } 1595351df4b2SJaegeuk Kim } 1596351df4b2SJaegeuk Kim } 1597351df4b2SJaegeuk Kim 1598351df4b2SJaegeuk Kim /* set uncompleted segment to curseg */ 1599351df4b2SJaegeuk Kim curseg = CURSEG_I(sbi, type); 1600351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 1601351df4b2SJaegeuk Kim memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE); 1602351df4b2SJaegeuk Kim curseg->next_segno = segno; 1603351df4b2SJaegeuk Kim reset_curseg(sbi, type, 0); 1604351df4b2SJaegeuk Kim curseg->alloc_type = ckpt->alloc_type[type]; 1605351df4b2SJaegeuk Kim curseg->next_blkoff = blk_off; 1606351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 1607351df4b2SJaegeuk Kim f2fs_put_page(new, 1); 1608351df4b2SJaegeuk Kim return 0; 1609351df4b2SJaegeuk Kim } 1610351df4b2SJaegeuk Kim 1611351df4b2SJaegeuk Kim static int restore_curseg_summaries(struct f2fs_sb_info *sbi) 1612351df4b2SJaegeuk Kim { 1613351df4b2SJaegeuk Kim int type = CURSEG_HOT_DATA; 1614e4fc5fbfSChao Yu int err; 1615351df4b2SJaegeuk Kim 161625ca923bSJaegeuk Kim if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { 16173fa06d7bSChao Yu int npages = npages_for_summary_flush(sbi, true); 16183fa06d7bSChao Yu 16193fa06d7bSChao Yu if (npages >= 2) 16203fa06d7bSChao Yu ra_meta_pages(sbi, start_sum_block(sbi), npages, 162126879fb1SChao Yu META_CP, true); 16223fa06d7bSChao Yu 1623351df4b2SJaegeuk Kim /* restore for compacted data summary */ 1624351df4b2SJaegeuk Kim if (read_compacted_summaries(sbi)) 1625351df4b2SJaegeuk Kim return -EINVAL; 1626351df4b2SJaegeuk Kim type = CURSEG_HOT_NODE; 1627351df4b2SJaegeuk Kim } 1628351df4b2SJaegeuk Kim 1629119ee914SJaegeuk Kim if (__exist_node_summaries(sbi)) 16303fa06d7bSChao Yu ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type), 163126879fb1SChao Yu NR_CURSEG_TYPE - type, META_CP, true); 16323fa06d7bSChao Yu 1633e4fc5fbfSChao Yu for (; type <= CURSEG_COLD_NODE; type++) { 1634e4fc5fbfSChao Yu err = read_normal_summaries(sbi, type); 1635e4fc5fbfSChao Yu if (err) 1636e4fc5fbfSChao Yu return err; 1637e4fc5fbfSChao Yu } 1638e4fc5fbfSChao Yu 1639351df4b2SJaegeuk Kim return 0; 1640351df4b2SJaegeuk Kim } 1641351df4b2SJaegeuk Kim 1642351df4b2SJaegeuk Kim static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) 1643351df4b2SJaegeuk Kim { 1644351df4b2SJaegeuk Kim struct page *page; 1645351df4b2SJaegeuk Kim unsigned char *kaddr; 1646351df4b2SJaegeuk Kim struct f2fs_summary *summary; 1647351df4b2SJaegeuk Kim struct curseg_info *seg_i; 1648351df4b2SJaegeuk Kim int written_size = 0; 1649351df4b2SJaegeuk Kim int i, j; 1650351df4b2SJaegeuk Kim 1651351df4b2SJaegeuk Kim page = grab_meta_page(sbi, blkaddr++); 1652351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 1653351df4b2SJaegeuk Kim 1654351df4b2SJaegeuk Kim /* Step 1: write nat cache */ 1655351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 1656351df4b2SJaegeuk Kim memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE); 1657351df4b2SJaegeuk Kim written_size += SUM_JOURNAL_SIZE; 1658351df4b2SJaegeuk Kim 1659351df4b2SJaegeuk Kim /* Step 2: write sit cache */ 1660351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 1661351df4b2SJaegeuk Kim memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits, 1662351df4b2SJaegeuk Kim SUM_JOURNAL_SIZE); 1663351df4b2SJaegeuk Kim written_size += SUM_JOURNAL_SIZE; 1664351df4b2SJaegeuk Kim 1665351df4b2SJaegeuk Kim /* Step 3: write summary entries */ 1666351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 1667351df4b2SJaegeuk Kim unsigned short blkoff; 1668351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, i); 1669351df4b2SJaegeuk Kim if (sbi->ckpt->alloc_type[i] == SSR) 1670351df4b2SJaegeuk Kim blkoff = sbi->blocks_per_seg; 1671351df4b2SJaegeuk Kim else 1672351df4b2SJaegeuk Kim blkoff = curseg_blkoff(sbi, i); 1673351df4b2SJaegeuk Kim 1674351df4b2SJaegeuk Kim for (j = 0; j < blkoff; j++) { 1675351df4b2SJaegeuk Kim if (!page) { 1676351df4b2SJaegeuk Kim page = grab_meta_page(sbi, blkaddr++); 1677351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 1678351df4b2SJaegeuk Kim written_size = 0; 1679351df4b2SJaegeuk Kim } 1680351df4b2SJaegeuk Kim summary = (struct f2fs_summary *)(kaddr + written_size); 1681351df4b2SJaegeuk Kim *summary = seg_i->sum_blk->entries[j]; 1682351df4b2SJaegeuk Kim written_size += SUMMARY_SIZE; 1683351df4b2SJaegeuk Kim 1684351df4b2SJaegeuk Kim if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1685351df4b2SJaegeuk Kim SUM_FOOTER_SIZE) 1686351df4b2SJaegeuk Kim continue; 1687351df4b2SJaegeuk Kim 1688e8d61a74SChao Yu set_page_dirty(page); 1689351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 1690351df4b2SJaegeuk Kim page = NULL; 1691351df4b2SJaegeuk Kim } 1692351df4b2SJaegeuk Kim } 1693e8d61a74SChao Yu if (page) { 1694e8d61a74SChao Yu set_page_dirty(page); 1695351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 1696351df4b2SJaegeuk Kim } 1697e8d61a74SChao Yu } 1698351df4b2SJaegeuk Kim 1699351df4b2SJaegeuk Kim static void write_normal_summaries(struct f2fs_sb_info *sbi, 1700351df4b2SJaegeuk Kim block_t blkaddr, int type) 1701351df4b2SJaegeuk Kim { 1702351df4b2SJaegeuk Kim int i, end; 1703351df4b2SJaegeuk Kim if (IS_DATASEG(type)) 1704351df4b2SJaegeuk Kim end = type + NR_CURSEG_DATA_TYPE; 1705351df4b2SJaegeuk Kim else 1706351df4b2SJaegeuk Kim end = type + NR_CURSEG_NODE_TYPE; 1707351df4b2SJaegeuk Kim 1708351df4b2SJaegeuk Kim for (i = type; i < end; i++) { 1709351df4b2SJaegeuk Kim struct curseg_info *sum = CURSEG_I(sbi, i); 1710351df4b2SJaegeuk Kim mutex_lock(&sum->curseg_mutex); 1711351df4b2SJaegeuk Kim write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type)); 1712351df4b2SJaegeuk Kim mutex_unlock(&sum->curseg_mutex); 1713351df4b2SJaegeuk Kim } 1714351df4b2SJaegeuk Kim } 1715351df4b2SJaegeuk Kim 1716351df4b2SJaegeuk Kim void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 1717351df4b2SJaegeuk Kim { 171825ca923bSJaegeuk Kim if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) 1719351df4b2SJaegeuk Kim write_compacted_summaries(sbi, start_blk); 1720351df4b2SJaegeuk Kim else 1721351df4b2SJaegeuk Kim write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); 1722351df4b2SJaegeuk Kim } 1723351df4b2SJaegeuk Kim 1724351df4b2SJaegeuk Kim void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 1725351df4b2SJaegeuk Kim { 1726351df4b2SJaegeuk Kim write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); 1727351df4b2SJaegeuk Kim } 1728351df4b2SJaegeuk Kim 1729351df4b2SJaegeuk Kim int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type, 1730351df4b2SJaegeuk Kim unsigned int val, int alloc) 1731351df4b2SJaegeuk Kim { 1732351df4b2SJaegeuk Kim int i; 1733351df4b2SJaegeuk Kim 1734351df4b2SJaegeuk Kim if (type == NAT_JOURNAL) { 1735351df4b2SJaegeuk Kim for (i = 0; i < nats_in_cursum(sum); i++) { 1736351df4b2SJaegeuk Kim if (le32_to_cpu(nid_in_journal(sum, i)) == val) 1737351df4b2SJaegeuk Kim return i; 1738351df4b2SJaegeuk Kim } 1739855639deSChao Yu if (alloc && __has_cursum_space(sum, 1, NAT_JOURNAL)) 1740351df4b2SJaegeuk Kim return update_nats_in_cursum(sum, 1); 1741351df4b2SJaegeuk Kim } else if (type == SIT_JOURNAL) { 1742351df4b2SJaegeuk Kim for (i = 0; i < sits_in_cursum(sum); i++) 1743351df4b2SJaegeuk Kim if (le32_to_cpu(segno_in_journal(sum, i)) == val) 1744351df4b2SJaegeuk Kim return i; 1745855639deSChao Yu if (alloc && __has_cursum_space(sum, 1, SIT_JOURNAL)) 1746351df4b2SJaegeuk Kim return update_sits_in_cursum(sum, 1); 1747351df4b2SJaegeuk Kim } 1748351df4b2SJaegeuk Kim return -1; 1749351df4b2SJaegeuk Kim } 1750351df4b2SJaegeuk Kim 1751351df4b2SJaegeuk Kim static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, 1752351df4b2SJaegeuk Kim unsigned int segno) 1753351df4b2SJaegeuk Kim { 17542cc22186SGu Zheng return get_meta_page(sbi, current_sit_addr(sbi, segno)); 1755351df4b2SJaegeuk Kim } 1756351df4b2SJaegeuk Kim 1757351df4b2SJaegeuk Kim static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, 1758351df4b2SJaegeuk Kim unsigned int start) 1759351df4b2SJaegeuk Kim { 1760351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1761351df4b2SJaegeuk Kim struct page *src_page, *dst_page; 1762351df4b2SJaegeuk Kim pgoff_t src_off, dst_off; 1763351df4b2SJaegeuk Kim void *src_addr, *dst_addr; 1764351df4b2SJaegeuk Kim 1765351df4b2SJaegeuk Kim src_off = current_sit_addr(sbi, start); 1766351df4b2SJaegeuk Kim dst_off = next_sit_addr(sbi, src_off); 1767351df4b2SJaegeuk Kim 1768351df4b2SJaegeuk Kim /* get current sit block page without lock */ 1769351df4b2SJaegeuk Kim src_page = get_meta_page(sbi, src_off); 1770351df4b2SJaegeuk Kim dst_page = grab_meta_page(sbi, dst_off); 17719850cf4aSJaegeuk Kim f2fs_bug_on(sbi, PageDirty(src_page)); 1772351df4b2SJaegeuk Kim 1773351df4b2SJaegeuk Kim src_addr = page_address(src_page); 1774351df4b2SJaegeuk Kim dst_addr = page_address(dst_page); 1775351df4b2SJaegeuk Kim memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 1776351df4b2SJaegeuk Kim 1777351df4b2SJaegeuk Kim set_page_dirty(dst_page); 1778351df4b2SJaegeuk Kim f2fs_put_page(src_page, 1); 1779351df4b2SJaegeuk Kim 1780351df4b2SJaegeuk Kim set_to_next_sit(sit_i, start); 1781351df4b2SJaegeuk Kim 1782351df4b2SJaegeuk Kim return dst_page; 1783351df4b2SJaegeuk Kim } 1784351df4b2SJaegeuk Kim 1785184a5cd2SChao Yu static struct sit_entry_set *grab_sit_entry_set(void) 1786184a5cd2SChao Yu { 1787184a5cd2SChao Yu struct sit_entry_set *ses = 178880c54505SJaegeuk Kim f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS); 1789184a5cd2SChao Yu 1790184a5cd2SChao Yu ses->entry_cnt = 0; 1791184a5cd2SChao Yu INIT_LIST_HEAD(&ses->set_list); 1792184a5cd2SChao Yu return ses; 1793184a5cd2SChao Yu } 1794184a5cd2SChao Yu 1795184a5cd2SChao Yu static void release_sit_entry_set(struct sit_entry_set *ses) 1796184a5cd2SChao Yu { 1797184a5cd2SChao Yu list_del(&ses->set_list); 1798184a5cd2SChao Yu kmem_cache_free(sit_entry_set_slab, ses); 1799184a5cd2SChao Yu } 1800184a5cd2SChao Yu 1801184a5cd2SChao Yu static void adjust_sit_entry_set(struct sit_entry_set *ses, 1802184a5cd2SChao Yu struct list_head *head) 1803184a5cd2SChao Yu { 1804184a5cd2SChao Yu struct sit_entry_set *next = ses; 1805184a5cd2SChao Yu 1806184a5cd2SChao Yu if (list_is_last(&ses->set_list, head)) 1807184a5cd2SChao Yu return; 1808184a5cd2SChao Yu 1809184a5cd2SChao Yu list_for_each_entry_continue(next, head, set_list) 1810184a5cd2SChao Yu if (ses->entry_cnt <= next->entry_cnt) 1811184a5cd2SChao Yu break; 1812184a5cd2SChao Yu 1813184a5cd2SChao Yu list_move_tail(&ses->set_list, &next->set_list); 1814184a5cd2SChao Yu } 1815184a5cd2SChao Yu 1816184a5cd2SChao Yu static void add_sit_entry(unsigned int segno, struct list_head *head) 1817184a5cd2SChao Yu { 1818184a5cd2SChao Yu struct sit_entry_set *ses; 1819184a5cd2SChao Yu unsigned int start_segno = START_SEGNO(segno); 1820184a5cd2SChao Yu 1821184a5cd2SChao Yu list_for_each_entry(ses, head, set_list) { 1822184a5cd2SChao Yu if (ses->start_segno == start_segno) { 1823184a5cd2SChao Yu ses->entry_cnt++; 1824184a5cd2SChao Yu adjust_sit_entry_set(ses, head); 1825184a5cd2SChao Yu return; 1826184a5cd2SChao Yu } 1827184a5cd2SChao Yu } 1828184a5cd2SChao Yu 1829184a5cd2SChao Yu ses = grab_sit_entry_set(); 1830184a5cd2SChao Yu 1831184a5cd2SChao Yu ses->start_segno = start_segno; 1832184a5cd2SChao Yu ses->entry_cnt++; 1833184a5cd2SChao Yu list_add(&ses->set_list, head); 1834184a5cd2SChao Yu } 1835184a5cd2SChao Yu 1836184a5cd2SChao Yu static void add_sits_in_set(struct f2fs_sb_info *sbi) 1837184a5cd2SChao Yu { 1838184a5cd2SChao Yu struct f2fs_sm_info *sm_info = SM_I(sbi); 1839184a5cd2SChao Yu struct list_head *set_list = &sm_info->sit_entry_set; 1840184a5cd2SChao Yu unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap; 1841184a5cd2SChao Yu unsigned int segno; 1842184a5cd2SChao Yu 18437cd8558bSJaegeuk Kim for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi)) 1844184a5cd2SChao Yu add_sit_entry(segno, set_list); 1845184a5cd2SChao Yu } 1846184a5cd2SChao Yu 1847184a5cd2SChao Yu static void remove_sits_in_journal(struct f2fs_sb_info *sbi) 1848351df4b2SJaegeuk Kim { 1849351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1850351df4b2SJaegeuk Kim struct f2fs_summary_block *sum = curseg->sum_blk; 1851351df4b2SJaegeuk Kim int i; 1852351df4b2SJaegeuk Kim 1853351df4b2SJaegeuk Kim for (i = sits_in_cursum(sum) - 1; i >= 0; i--) { 1854351df4b2SJaegeuk Kim unsigned int segno; 1855184a5cd2SChao Yu bool dirtied; 1856184a5cd2SChao Yu 1857351df4b2SJaegeuk Kim segno = le32_to_cpu(segno_in_journal(sum, i)); 1858184a5cd2SChao Yu dirtied = __mark_sit_entry_dirty(sbi, segno); 1859184a5cd2SChao Yu 1860184a5cd2SChao Yu if (!dirtied) 1861184a5cd2SChao Yu add_sit_entry(segno, &SM_I(sbi)->sit_entry_set); 1862351df4b2SJaegeuk Kim } 1863351df4b2SJaegeuk Kim update_sits_in_cursum(sum, -sits_in_cursum(sum)); 1864351df4b2SJaegeuk Kim } 1865351df4b2SJaegeuk Kim 18660a8165d7SJaegeuk Kim /* 1867351df4b2SJaegeuk Kim * CP calls this function, which flushes SIT entries including sit_journal, 1868351df4b2SJaegeuk Kim * and moves prefree segs to free segs. 1869351df4b2SJaegeuk Kim */ 18704b2fecc8SJaegeuk Kim void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) 1871351df4b2SJaegeuk Kim { 1872351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1873351df4b2SJaegeuk Kim unsigned long *bitmap = sit_i->dirty_sentries_bitmap; 1874351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1875351df4b2SJaegeuk Kim struct f2fs_summary_block *sum = curseg->sum_blk; 1876184a5cd2SChao Yu struct sit_entry_set *ses, *tmp; 1877184a5cd2SChao Yu struct list_head *head = &SM_I(sbi)->sit_entry_set; 1878184a5cd2SChao Yu bool to_journal = true; 18794b2fecc8SJaegeuk Kim struct seg_entry *se; 1880351df4b2SJaegeuk Kim 1881351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 1882351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 1883351df4b2SJaegeuk Kim 18842b11a74bSWanpeng Li if (!sit_i->dirty_sentries) 18852b11a74bSWanpeng Li goto out; 18862b11a74bSWanpeng Li 1887351df4b2SJaegeuk Kim /* 1888184a5cd2SChao Yu * add and account sit entries of dirty bitmap in sit entry 1889184a5cd2SChao Yu * set temporarily 1890351df4b2SJaegeuk Kim */ 1891184a5cd2SChao Yu add_sits_in_set(sbi); 1892351df4b2SJaegeuk Kim 1893184a5cd2SChao Yu /* 1894184a5cd2SChao Yu * if there are no enough space in journal to store dirty sit 1895184a5cd2SChao Yu * entries, remove all entries from journal and add and account 1896184a5cd2SChao Yu * them in sit entry set. 1897184a5cd2SChao Yu */ 1898184a5cd2SChao Yu if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL)) 1899184a5cd2SChao Yu remove_sits_in_journal(sbi); 1900184a5cd2SChao Yu 1901184a5cd2SChao Yu /* 1902184a5cd2SChao Yu * there are two steps to flush sit entries: 1903184a5cd2SChao Yu * #1, flush sit entries to journal in current cold data summary block. 1904184a5cd2SChao Yu * #2, flush sit entries to sit page. 1905184a5cd2SChao Yu */ 1906184a5cd2SChao Yu list_for_each_entry_safe(ses, tmp, head, set_list) { 19074a257ed6SJaegeuk Kim struct page *page = NULL; 1908184a5cd2SChao Yu struct f2fs_sit_block *raw_sit = NULL; 1909184a5cd2SChao Yu unsigned int start_segno = ses->start_segno; 1910184a5cd2SChao Yu unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK, 19117cd8558bSJaegeuk Kim (unsigned long)MAIN_SEGS(sbi)); 1912184a5cd2SChao Yu unsigned int segno = start_segno; 1913184a5cd2SChao Yu 1914184a5cd2SChao Yu if (to_journal && 1915184a5cd2SChao Yu !__has_cursum_space(sum, ses->entry_cnt, SIT_JOURNAL)) 1916184a5cd2SChao Yu to_journal = false; 1917184a5cd2SChao Yu 1918184a5cd2SChao Yu if (!to_journal) { 1919184a5cd2SChao Yu page = get_next_sit_page(sbi, start_segno); 1920184a5cd2SChao Yu raw_sit = page_address(page); 1921184a5cd2SChao Yu } 1922184a5cd2SChao Yu 1923184a5cd2SChao Yu /* flush dirty sit entries in region of current sit set */ 1924184a5cd2SChao Yu for_each_set_bit_from(segno, bitmap, end) { 1925184a5cd2SChao Yu int offset, sit_offset; 19264b2fecc8SJaegeuk Kim 19274b2fecc8SJaegeuk Kim se = get_seg_entry(sbi, segno); 1928351df4b2SJaegeuk Kim 1929b2955550SJaegeuk Kim /* add discard candidates */ 1930d7bc2484SJaegeuk Kim if (cpc->reason != CP_DISCARD) { 19314b2fecc8SJaegeuk Kim cpc->trim_start = segno; 19324b2fecc8SJaegeuk Kim add_discard_addrs(sbi, cpc); 19334b2fecc8SJaegeuk Kim } 1934b2955550SJaegeuk Kim 1935184a5cd2SChao Yu if (to_journal) { 1936184a5cd2SChao Yu offset = lookup_journal_in_cursum(sum, 1937184a5cd2SChao Yu SIT_JOURNAL, segno, 1); 1938184a5cd2SChao Yu f2fs_bug_on(sbi, offset < 0); 1939184a5cd2SChao Yu segno_in_journal(sum, offset) = 1940184a5cd2SChao Yu cpu_to_le32(segno); 1941184a5cd2SChao Yu seg_info_to_raw_sit(se, 1942184a5cd2SChao Yu &sit_in_journal(sum, offset)); 1943184a5cd2SChao Yu } else { 1944184a5cd2SChao Yu sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); 1945184a5cd2SChao Yu seg_info_to_raw_sit(se, 1946184a5cd2SChao Yu &raw_sit->entries[sit_offset]); 1947351df4b2SJaegeuk Kim } 1948351df4b2SJaegeuk Kim 1949351df4b2SJaegeuk Kim __clear_bit(segno, bitmap); 1950351df4b2SJaegeuk Kim sit_i->dirty_sentries--; 1951184a5cd2SChao Yu ses->entry_cnt--; 1952351df4b2SJaegeuk Kim } 1953184a5cd2SChao Yu 1954184a5cd2SChao Yu if (!to_journal) 1955184a5cd2SChao Yu f2fs_put_page(page, 1); 1956184a5cd2SChao Yu 1957184a5cd2SChao Yu f2fs_bug_on(sbi, ses->entry_cnt); 1958184a5cd2SChao Yu release_sit_entry_set(ses); 1959184a5cd2SChao Yu } 1960184a5cd2SChao Yu 1961184a5cd2SChao Yu f2fs_bug_on(sbi, !list_empty(head)); 1962184a5cd2SChao Yu f2fs_bug_on(sbi, sit_i->dirty_sentries); 1963184a5cd2SChao Yu out: 19644b2fecc8SJaegeuk Kim if (cpc->reason == CP_DISCARD) { 19654b2fecc8SJaegeuk Kim for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) 19664b2fecc8SJaegeuk Kim add_discard_addrs(sbi, cpc); 19674b2fecc8SJaegeuk Kim } 1968351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 1969351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 1970351df4b2SJaegeuk Kim 1971351df4b2SJaegeuk Kim set_prefree_as_free_segments(sbi); 1972351df4b2SJaegeuk Kim } 1973351df4b2SJaegeuk Kim 1974351df4b2SJaegeuk Kim static int build_sit_info(struct f2fs_sb_info *sbi) 1975351df4b2SJaegeuk Kim { 1976351df4b2SJaegeuk Kim struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 1977351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1978351df4b2SJaegeuk Kim struct sit_info *sit_i; 1979351df4b2SJaegeuk Kim unsigned int sit_segs, start; 1980351df4b2SJaegeuk Kim char *src_bitmap, *dst_bitmap; 1981351df4b2SJaegeuk Kim unsigned int bitmap_size; 1982351df4b2SJaegeuk Kim 1983351df4b2SJaegeuk Kim /* allocate memory for SIT information */ 1984351df4b2SJaegeuk Kim sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL); 1985351df4b2SJaegeuk Kim if (!sit_i) 1986351df4b2SJaegeuk Kim return -ENOMEM; 1987351df4b2SJaegeuk Kim 1988351df4b2SJaegeuk Kim SM_I(sbi)->sit_info = sit_i; 1989351df4b2SJaegeuk Kim 199039307a8eSJaegeuk Kim sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) * 199139307a8eSJaegeuk Kim sizeof(struct seg_entry), GFP_KERNEL); 1992351df4b2SJaegeuk Kim if (!sit_i->sentries) 1993351df4b2SJaegeuk Kim return -ENOMEM; 1994351df4b2SJaegeuk Kim 19957cd8558bSJaegeuk Kim bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 199639307a8eSJaegeuk Kim sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); 1997351df4b2SJaegeuk Kim if (!sit_i->dirty_sentries_bitmap) 1998351df4b2SJaegeuk Kim return -ENOMEM; 1999351df4b2SJaegeuk Kim 20007cd8558bSJaegeuk Kim for (start = 0; start < MAIN_SEGS(sbi); start++) { 2001351df4b2SJaegeuk Kim sit_i->sentries[start].cur_valid_map 2002351df4b2SJaegeuk Kim = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 2003351df4b2SJaegeuk Kim sit_i->sentries[start].ckpt_valid_map 2004351df4b2SJaegeuk Kim = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 2005a66cdd98SJaegeuk Kim sit_i->sentries[start].discard_map 2006a66cdd98SJaegeuk Kim = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 2007a66cdd98SJaegeuk Kim if (!sit_i->sentries[start].cur_valid_map || 2008a66cdd98SJaegeuk Kim !sit_i->sentries[start].ckpt_valid_map || 2009a66cdd98SJaegeuk Kim !sit_i->sentries[start].discard_map) 2010351df4b2SJaegeuk Kim return -ENOMEM; 2011351df4b2SJaegeuk Kim } 2012351df4b2SJaegeuk Kim 201360a3b782SJaegeuk Kim sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 201460a3b782SJaegeuk Kim if (!sit_i->tmp_map) 201560a3b782SJaegeuk Kim return -ENOMEM; 201660a3b782SJaegeuk Kim 2017351df4b2SJaegeuk Kim if (sbi->segs_per_sec > 1) { 201839307a8eSJaegeuk Kim sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) * 201939307a8eSJaegeuk Kim sizeof(struct sec_entry), GFP_KERNEL); 2020351df4b2SJaegeuk Kim if (!sit_i->sec_entries) 2021351df4b2SJaegeuk Kim return -ENOMEM; 2022351df4b2SJaegeuk Kim } 2023351df4b2SJaegeuk Kim 2024351df4b2SJaegeuk Kim /* get information related with SIT */ 2025351df4b2SJaegeuk Kim sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; 2026351df4b2SJaegeuk Kim 2027351df4b2SJaegeuk Kim /* setup SIT bitmap from ckeckpoint pack */ 2028351df4b2SJaegeuk Kim bitmap_size = __bitmap_size(sbi, SIT_BITMAP); 2029351df4b2SJaegeuk Kim src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); 2030351df4b2SJaegeuk Kim 203179b5793bSAlexandru Gheorghiu dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); 2032351df4b2SJaegeuk Kim if (!dst_bitmap) 2033351df4b2SJaegeuk Kim return -ENOMEM; 2034351df4b2SJaegeuk Kim 2035351df4b2SJaegeuk Kim /* init SIT information */ 2036351df4b2SJaegeuk Kim sit_i->s_ops = &default_salloc_ops; 2037351df4b2SJaegeuk Kim 2038351df4b2SJaegeuk Kim sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); 2039351df4b2SJaegeuk Kim sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; 2040351df4b2SJaegeuk Kim sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count); 2041351df4b2SJaegeuk Kim sit_i->sit_bitmap = dst_bitmap; 2042351df4b2SJaegeuk Kim sit_i->bitmap_size = bitmap_size; 2043351df4b2SJaegeuk Kim sit_i->dirty_sentries = 0; 2044351df4b2SJaegeuk Kim sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; 2045351df4b2SJaegeuk Kim sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); 2046351df4b2SJaegeuk Kim sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; 2047351df4b2SJaegeuk Kim mutex_init(&sit_i->sentry_lock); 2048351df4b2SJaegeuk Kim return 0; 2049351df4b2SJaegeuk Kim } 2050351df4b2SJaegeuk Kim 2051351df4b2SJaegeuk Kim static int build_free_segmap(struct f2fs_sb_info *sbi) 2052351df4b2SJaegeuk Kim { 2053351df4b2SJaegeuk Kim struct free_segmap_info *free_i; 2054351df4b2SJaegeuk Kim unsigned int bitmap_size, sec_bitmap_size; 2055351df4b2SJaegeuk Kim 2056351df4b2SJaegeuk Kim /* allocate memory for free segmap information */ 2057351df4b2SJaegeuk Kim free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL); 2058351df4b2SJaegeuk Kim if (!free_i) 2059351df4b2SJaegeuk Kim return -ENOMEM; 2060351df4b2SJaegeuk Kim 2061351df4b2SJaegeuk Kim SM_I(sbi)->free_info = free_i; 2062351df4b2SJaegeuk Kim 20637cd8558bSJaegeuk Kim bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 206439307a8eSJaegeuk Kim free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL); 2065351df4b2SJaegeuk Kim if (!free_i->free_segmap) 2066351df4b2SJaegeuk Kim return -ENOMEM; 2067351df4b2SJaegeuk Kim 20687cd8558bSJaegeuk Kim sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 206939307a8eSJaegeuk Kim free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL); 2070351df4b2SJaegeuk Kim if (!free_i->free_secmap) 2071351df4b2SJaegeuk Kim return -ENOMEM; 2072351df4b2SJaegeuk Kim 2073351df4b2SJaegeuk Kim /* set all segments as dirty temporarily */ 2074351df4b2SJaegeuk Kim memset(free_i->free_segmap, 0xff, bitmap_size); 2075351df4b2SJaegeuk Kim memset(free_i->free_secmap, 0xff, sec_bitmap_size); 2076351df4b2SJaegeuk Kim 2077351df4b2SJaegeuk Kim /* init free segmap information */ 20787cd8558bSJaegeuk Kim free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); 2079351df4b2SJaegeuk Kim free_i->free_segments = 0; 2080351df4b2SJaegeuk Kim free_i->free_sections = 0; 20811a118ccfSChao Yu spin_lock_init(&free_i->segmap_lock); 2082351df4b2SJaegeuk Kim return 0; 2083351df4b2SJaegeuk Kim } 2084351df4b2SJaegeuk Kim 2085351df4b2SJaegeuk Kim static int build_curseg(struct f2fs_sb_info *sbi) 2086351df4b2SJaegeuk Kim { 20871042d60fSNamjae Jeon struct curseg_info *array; 2088351df4b2SJaegeuk Kim int i; 2089351df4b2SJaegeuk Kim 2090b434babfSFabian Frederick array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL); 2091351df4b2SJaegeuk Kim if (!array) 2092351df4b2SJaegeuk Kim return -ENOMEM; 2093351df4b2SJaegeuk Kim 2094351df4b2SJaegeuk Kim SM_I(sbi)->curseg_array = array; 2095351df4b2SJaegeuk Kim 2096351df4b2SJaegeuk Kim for (i = 0; i < NR_CURSEG_TYPE; i++) { 2097351df4b2SJaegeuk Kim mutex_init(&array[i].curseg_mutex); 2098351df4b2SJaegeuk Kim array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 2099351df4b2SJaegeuk Kim if (!array[i].sum_blk) 2100351df4b2SJaegeuk Kim return -ENOMEM; 2101351df4b2SJaegeuk Kim array[i].segno = NULL_SEGNO; 2102351df4b2SJaegeuk Kim array[i].next_blkoff = 0; 2103351df4b2SJaegeuk Kim } 2104351df4b2SJaegeuk Kim return restore_curseg_summaries(sbi); 2105351df4b2SJaegeuk Kim } 2106351df4b2SJaegeuk Kim 2107351df4b2SJaegeuk Kim static void build_sit_entries(struct f2fs_sb_info *sbi) 2108351df4b2SJaegeuk Kim { 2109351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 2110351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 2111351df4b2SJaegeuk Kim struct f2fs_summary_block *sum = curseg->sum_blk; 211274de593aSChao Yu int sit_blk_cnt = SIT_BLK_CNT(sbi); 211374de593aSChao Yu unsigned int i, start, end; 211474de593aSChao Yu unsigned int readed, start_blk = 0; 211590a893c7SJaegeuk Kim int nrpages = MAX_BIO_BLOCKS(sbi); 2116351df4b2SJaegeuk Kim 211774de593aSChao Yu do { 211826879fb1SChao Yu readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT, true); 211974de593aSChao Yu 212074de593aSChao Yu start = start_blk * sit_i->sents_per_block; 212174de593aSChao Yu end = (start_blk + readed) * sit_i->sents_per_block; 212274de593aSChao Yu 21237cd8558bSJaegeuk Kim for (; start < end && start < MAIN_SEGS(sbi); start++) { 2124351df4b2SJaegeuk Kim struct seg_entry *se = &sit_i->sentries[start]; 2125351df4b2SJaegeuk Kim struct f2fs_sit_block *sit_blk; 2126351df4b2SJaegeuk Kim struct f2fs_sit_entry sit; 2127351df4b2SJaegeuk Kim struct page *page; 2128351df4b2SJaegeuk Kim 2129351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 2130351df4b2SJaegeuk Kim for (i = 0; i < sits_in_cursum(sum); i++) { 21316c311ec6SChris Fries if (le32_to_cpu(segno_in_journal(sum, i)) 21326c311ec6SChris Fries == start) { 2133351df4b2SJaegeuk Kim sit = sit_in_journal(sum, i); 2134351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 2135351df4b2SJaegeuk Kim goto got_it; 2136351df4b2SJaegeuk Kim } 2137351df4b2SJaegeuk Kim } 2138351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 213974de593aSChao Yu 2140351df4b2SJaegeuk Kim page = get_current_sit_page(sbi, start); 2141351df4b2SJaegeuk Kim sit_blk = (struct f2fs_sit_block *)page_address(page); 2142351df4b2SJaegeuk Kim sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; 2143351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 2144351df4b2SJaegeuk Kim got_it: 2145351df4b2SJaegeuk Kim check_block_count(sbi, start, &sit); 2146351df4b2SJaegeuk Kim seg_info_from_raw_sit(se, &sit); 2147a66cdd98SJaegeuk Kim 2148a66cdd98SJaegeuk Kim /* build discard map only one time */ 2149a66cdd98SJaegeuk Kim memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); 2150a66cdd98SJaegeuk Kim sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks; 2151a66cdd98SJaegeuk Kim 2152351df4b2SJaegeuk Kim if (sbi->segs_per_sec > 1) { 2153351df4b2SJaegeuk Kim struct sec_entry *e = get_sec_entry(sbi, start); 2154351df4b2SJaegeuk Kim e->valid_blocks += se->valid_blocks; 2155351df4b2SJaegeuk Kim } 2156351df4b2SJaegeuk Kim } 215774de593aSChao Yu start_blk += readed; 215874de593aSChao Yu } while (start_blk < sit_blk_cnt); 2159351df4b2SJaegeuk Kim } 2160351df4b2SJaegeuk Kim 2161351df4b2SJaegeuk Kim static void init_free_segmap(struct f2fs_sb_info *sbi) 2162351df4b2SJaegeuk Kim { 2163351df4b2SJaegeuk Kim unsigned int start; 2164351df4b2SJaegeuk Kim int type; 2165351df4b2SJaegeuk Kim 21667cd8558bSJaegeuk Kim for (start = 0; start < MAIN_SEGS(sbi); start++) { 2167351df4b2SJaegeuk Kim struct seg_entry *sentry = get_seg_entry(sbi, start); 2168351df4b2SJaegeuk Kim if (!sentry->valid_blocks) 2169351df4b2SJaegeuk Kim __set_free(sbi, start); 2170351df4b2SJaegeuk Kim } 2171351df4b2SJaegeuk Kim 2172351df4b2SJaegeuk Kim /* set use the current segments */ 2173351df4b2SJaegeuk Kim for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { 2174351df4b2SJaegeuk Kim struct curseg_info *curseg_t = CURSEG_I(sbi, type); 2175351df4b2SJaegeuk Kim __set_test_and_inuse(sbi, curseg_t->segno); 2176351df4b2SJaegeuk Kim } 2177351df4b2SJaegeuk Kim } 2178351df4b2SJaegeuk Kim 2179351df4b2SJaegeuk Kim static void init_dirty_segmap(struct f2fs_sb_info *sbi) 2180351df4b2SJaegeuk Kim { 2181351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2182351df4b2SJaegeuk Kim struct free_segmap_info *free_i = FREE_I(sbi); 21837cd8558bSJaegeuk Kim unsigned int segno = 0, offset = 0; 2184351df4b2SJaegeuk Kim unsigned short valid_blocks; 2185351df4b2SJaegeuk Kim 21868736fbf0SNamjae Jeon while (1) { 2187351df4b2SJaegeuk Kim /* find dirty segment based on free segmap */ 21887cd8558bSJaegeuk Kim segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset); 21897cd8558bSJaegeuk Kim if (segno >= MAIN_SEGS(sbi)) 2190351df4b2SJaegeuk Kim break; 2191351df4b2SJaegeuk Kim offset = segno + 1; 2192351df4b2SJaegeuk Kim valid_blocks = get_valid_blocks(sbi, segno, 0); 2193ec325b52SJaegeuk Kim if (valid_blocks == sbi->blocks_per_seg || !valid_blocks) 2194351df4b2SJaegeuk Kim continue; 2195ec325b52SJaegeuk Kim if (valid_blocks > sbi->blocks_per_seg) { 2196ec325b52SJaegeuk Kim f2fs_bug_on(sbi, 1); 2197ec325b52SJaegeuk Kim continue; 2198ec325b52SJaegeuk Kim } 2199351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 2200351df4b2SJaegeuk Kim __locate_dirty_segment(sbi, segno, DIRTY); 2201351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 2202351df4b2SJaegeuk Kim } 2203351df4b2SJaegeuk Kim } 2204351df4b2SJaegeuk Kim 22055ec4e49fSJaegeuk Kim static int init_victim_secmap(struct f2fs_sb_info *sbi) 2206351df4b2SJaegeuk Kim { 2207351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 22087cd8558bSJaegeuk Kim unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 2209351df4b2SJaegeuk Kim 221039307a8eSJaegeuk Kim dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); 22115ec4e49fSJaegeuk Kim if (!dirty_i->victim_secmap) 2212351df4b2SJaegeuk Kim return -ENOMEM; 2213351df4b2SJaegeuk Kim return 0; 2214351df4b2SJaegeuk Kim } 2215351df4b2SJaegeuk Kim 2216351df4b2SJaegeuk Kim static int build_dirty_segmap(struct f2fs_sb_info *sbi) 2217351df4b2SJaegeuk Kim { 2218351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i; 2219351df4b2SJaegeuk Kim unsigned int bitmap_size, i; 2220351df4b2SJaegeuk Kim 2221351df4b2SJaegeuk Kim /* allocate memory for dirty segments list information */ 2222351df4b2SJaegeuk Kim dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL); 2223351df4b2SJaegeuk Kim if (!dirty_i) 2224351df4b2SJaegeuk Kim return -ENOMEM; 2225351df4b2SJaegeuk Kim 2226351df4b2SJaegeuk Kim SM_I(sbi)->dirty_info = dirty_i; 2227351df4b2SJaegeuk Kim mutex_init(&dirty_i->seglist_lock); 2228351df4b2SJaegeuk Kim 22297cd8558bSJaegeuk Kim bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 2230351df4b2SJaegeuk Kim 2231351df4b2SJaegeuk Kim for (i = 0; i < NR_DIRTY_TYPE; i++) { 223239307a8eSJaegeuk Kim dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); 2233351df4b2SJaegeuk Kim if (!dirty_i->dirty_segmap[i]) 2234351df4b2SJaegeuk Kim return -ENOMEM; 2235351df4b2SJaegeuk Kim } 2236351df4b2SJaegeuk Kim 2237351df4b2SJaegeuk Kim init_dirty_segmap(sbi); 22385ec4e49fSJaegeuk Kim return init_victim_secmap(sbi); 2239351df4b2SJaegeuk Kim } 2240351df4b2SJaegeuk Kim 22410a8165d7SJaegeuk Kim /* 2242351df4b2SJaegeuk Kim * Update min, max modified time for cost-benefit GC algorithm 2243351df4b2SJaegeuk Kim */ 2244351df4b2SJaegeuk Kim static void init_min_max_mtime(struct f2fs_sb_info *sbi) 2245351df4b2SJaegeuk Kim { 2246351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 2247351df4b2SJaegeuk Kim unsigned int segno; 2248351df4b2SJaegeuk Kim 2249351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 2250351df4b2SJaegeuk Kim 2251351df4b2SJaegeuk Kim sit_i->min_mtime = LLONG_MAX; 2252351df4b2SJaegeuk Kim 22537cd8558bSJaegeuk Kim for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) { 2254351df4b2SJaegeuk Kim unsigned int i; 2255351df4b2SJaegeuk Kim unsigned long long mtime = 0; 2256351df4b2SJaegeuk Kim 2257351df4b2SJaegeuk Kim for (i = 0; i < sbi->segs_per_sec; i++) 2258351df4b2SJaegeuk Kim mtime += get_seg_entry(sbi, segno + i)->mtime; 2259351df4b2SJaegeuk Kim 2260351df4b2SJaegeuk Kim mtime = div_u64(mtime, sbi->segs_per_sec); 2261351df4b2SJaegeuk Kim 2262351df4b2SJaegeuk Kim if (sit_i->min_mtime > mtime) 2263351df4b2SJaegeuk Kim sit_i->min_mtime = mtime; 2264351df4b2SJaegeuk Kim } 2265351df4b2SJaegeuk Kim sit_i->max_mtime = get_mtime(sbi); 2266351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 2267351df4b2SJaegeuk Kim } 2268351df4b2SJaegeuk Kim 2269351df4b2SJaegeuk Kim int build_segment_manager(struct f2fs_sb_info *sbi) 2270351df4b2SJaegeuk Kim { 2271351df4b2SJaegeuk Kim struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 2272351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 22731042d60fSNamjae Jeon struct f2fs_sm_info *sm_info; 2274351df4b2SJaegeuk Kim int err; 2275351df4b2SJaegeuk Kim 2276351df4b2SJaegeuk Kim sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); 2277351df4b2SJaegeuk Kim if (!sm_info) 2278351df4b2SJaegeuk Kim return -ENOMEM; 2279351df4b2SJaegeuk Kim 2280351df4b2SJaegeuk Kim /* init sm info */ 2281351df4b2SJaegeuk Kim sbi->sm_info = sm_info; 2282351df4b2SJaegeuk Kim sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 2283351df4b2SJaegeuk Kim sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 2284351df4b2SJaegeuk Kim sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 2285351df4b2SJaegeuk Kim sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 2286351df4b2SJaegeuk Kim sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 2287351df4b2SJaegeuk Kim sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); 2288351df4b2SJaegeuk Kim sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 228958c41035SJaegeuk Kim sm_info->rec_prefree_segments = sm_info->main_segments * 229058c41035SJaegeuk Kim DEF_RECLAIM_PREFREE_SEGMENTS / 100; 22919b5f136fSJaegeuk Kim sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC; 2292216fbd64SJaegeuk Kim sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; 2293c1ce1b02SJaegeuk Kim sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; 2294351df4b2SJaegeuk Kim 22957fd9e544SJaegeuk Kim INIT_LIST_HEAD(&sm_info->discard_list); 22967fd9e544SJaegeuk Kim sm_info->nr_discards = 0; 22977fd9e544SJaegeuk Kim sm_info->max_discards = 0; 22987fd9e544SJaegeuk Kim 2299bba681cbSJaegeuk Kim sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS; 2300bba681cbSJaegeuk Kim 2301184a5cd2SChao Yu INIT_LIST_HEAD(&sm_info->sit_entry_set); 2302184a5cd2SChao Yu 2303b270ad6fSGu Zheng if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) { 23042163d198SGu Zheng err = create_flush_cmd_control(sbi); 23052163d198SGu Zheng if (err) 2306a688b9d9SGu Zheng return err; 2307a688b9d9SGu Zheng } 23086b4afdd7SJaegeuk Kim 2309351df4b2SJaegeuk Kim err = build_sit_info(sbi); 2310351df4b2SJaegeuk Kim if (err) 2311351df4b2SJaegeuk Kim return err; 2312351df4b2SJaegeuk Kim err = build_free_segmap(sbi); 2313351df4b2SJaegeuk Kim if (err) 2314351df4b2SJaegeuk Kim return err; 2315351df4b2SJaegeuk Kim err = build_curseg(sbi); 2316351df4b2SJaegeuk Kim if (err) 2317351df4b2SJaegeuk Kim return err; 2318351df4b2SJaegeuk Kim 2319351df4b2SJaegeuk Kim /* reinit free segmap based on SIT */ 2320351df4b2SJaegeuk Kim build_sit_entries(sbi); 2321351df4b2SJaegeuk Kim 2322351df4b2SJaegeuk Kim init_free_segmap(sbi); 2323351df4b2SJaegeuk Kim err = build_dirty_segmap(sbi); 2324351df4b2SJaegeuk Kim if (err) 2325351df4b2SJaegeuk Kim return err; 2326351df4b2SJaegeuk Kim 2327351df4b2SJaegeuk Kim init_min_max_mtime(sbi); 2328351df4b2SJaegeuk Kim return 0; 2329351df4b2SJaegeuk Kim } 2330351df4b2SJaegeuk Kim 2331351df4b2SJaegeuk Kim static void discard_dirty_segmap(struct f2fs_sb_info *sbi, 2332351df4b2SJaegeuk Kim enum dirty_type dirty_type) 2333351df4b2SJaegeuk Kim { 2334351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2335351df4b2SJaegeuk Kim 2336351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 233739307a8eSJaegeuk Kim kvfree(dirty_i->dirty_segmap[dirty_type]); 2338351df4b2SJaegeuk Kim dirty_i->nr_dirty[dirty_type] = 0; 2339351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 2340351df4b2SJaegeuk Kim } 2341351df4b2SJaegeuk Kim 23425ec4e49fSJaegeuk Kim static void destroy_victim_secmap(struct f2fs_sb_info *sbi) 2343351df4b2SJaegeuk Kim { 2344351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 234539307a8eSJaegeuk Kim kvfree(dirty_i->victim_secmap); 2346351df4b2SJaegeuk Kim } 2347351df4b2SJaegeuk Kim 2348351df4b2SJaegeuk Kim static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) 2349351df4b2SJaegeuk Kim { 2350351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2351351df4b2SJaegeuk Kim int i; 2352351df4b2SJaegeuk Kim 2353351df4b2SJaegeuk Kim if (!dirty_i) 2354351df4b2SJaegeuk Kim return; 2355351df4b2SJaegeuk Kim 2356351df4b2SJaegeuk Kim /* discard pre-free/dirty segments list */ 2357351df4b2SJaegeuk Kim for (i = 0; i < NR_DIRTY_TYPE; i++) 2358351df4b2SJaegeuk Kim discard_dirty_segmap(sbi, i); 2359351df4b2SJaegeuk Kim 23605ec4e49fSJaegeuk Kim destroy_victim_secmap(sbi); 2361351df4b2SJaegeuk Kim SM_I(sbi)->dirty_info = NULL; 2362351df4b2SJaegeuk Kim kfree(dirty_i); 2363351df4b2SJaegeuk Kim } 2364351df4b2SJaegeuk Kim 2365351df4b2SJaegeuk Kim static void destroy_curseg(struct f2fs_sb_info *sbi) 2366351df4b2SJaegeuk Kim { 2367351df4b2SJaegeuk Kim struct curseg_info *array = SM_I(sbi)->curseg_array; 2368351df4b2SJaegeuk Kim int i; 2369351df4b2SJaegeuk Kim 2370351df4b2SJaegeuk Kim if (!array) 2371351df4b2SJaegeuk Kim return; 2372351df4b2SJaegeuk Kim SM_I(sbi)->curseg_array = NULL; 2373351df4b2SJaegeuk Kim for (i = 0; i < NR_CURSEG_TYPE; i++) 2374351df4b2SJaegeuk Kim kfree(array[i].sum_blk); 2375351df4b2SJaegeuk Kim kfree(array); 2376351df4b2SJaegeuk Kim } 2377351df4b2SJaegeuk Kim 2378351df4b2SJaegeuk Kim static void destroy_free_segmap(struct f2fs_sb_info *sbi) 2379351df4b2SJaegeuk Kim { 2380351df4b2SJaegeuk Kim struct free_segmap_info *free_i = SM_I(sbi)->free_info; 2381351df4b2SJaegeuk Kim if (!free_i) 2382351df4b2SJaegeuk Kim return; 2383351df4b2SJaegeuk Kim SM_I(sbi)->free_info = NULL; 238439307a8eSJaegeuk Kim kvfree(free_i->free_segmap); 238539307a8eSJaegeuk Kim kvfree(free_i->free_secmap); 2386351df4b2SJaegeuk Kim kfree(free_i); 2387351df4b2SJaegeuk Kim } 2388351df4b2SJaegeuk Kim 2389351df4b2SJaegeuk Kim static void destroy_sit_info(struct f2fs_sb_info *sbi) 2390351df4b2SJaegeuk Kim { 2391351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 2392351df4b2SJaegeuk Kim unsigned int start; 2393351df4b2SJaegeuk Kim 2394351df4b2SJaegeuk Kim if (!sit_i) 2395351df4b2SJaegeuk Kim return; 2396351df4b2SJaegeuk Kim 2397351df4b2SJaegeuk Kim if (sit_i->sentries) { 23987cd8558bSJaegeuk Kim for (start = 0; start < MAIN_SEGS(sbi); start++) { 2399351df4b2SJaegeuk Kim kfree(sit_i->sentries[start].cur_valid_map); 2400351df4b2SJaegeuk Kim kfree(sit_i->sentries[start].ckpt_valid_map); 2401a66cdd98SJaegeuk Kim kfree(sit_i->sentries[start].discard_map); 2402351df4b2SJaegeuk Kim } 2403351df4b2SJaegeuk Kim } 240460a3b782SJaegeuk Kim kfree(sit_i->tmp_map); 240560a3b782SJaegeuk Kim 240639307a8eSJaegeuk Kim kvfree(sit_i->sentries); 240739307a8eSJaegeuk Kim kvfree(sit_i->sec_entries); 240839307a8eSJaegeuk Kim kvfree(sit_i->dirty_sentries_bitmap); 2409351df4b2SJaegeuk Kim 2410351df4b2SJaegeuk Kim SM_I(sbi)->sit_info = NULL; 2411351df4b2SJaegeuk Kim kfree(sit_i->sit_bitmap); 2412351df4b2SJaegeuk Kim kfree(sit_i); 2413351df4b2SJaegeuk Kim } 2414351df4b2SJaegeuk Kim 2415351df4b2SJaegeuk Kim void destroy_segment_manager(struct f2fs_sb_info *sbi) 2416351df4b2SJaegeuk Kim { 2417351df4b2SJaegeuk Kim struct f2fs_sm_info *sm_info = SM_I(sbi); 2418a688b9d9SGu Zheng 24193b03f724SChao Yu if (!sm_info) 24203b03f724SChao Yu return; 24212163d198SGu Zheng destroy_flush_cmd_control(sbi); 2422351df4b2SJaegeuk Kim destroy_dirty_segmap(sbi); 2423351df4b2SJaegeuk Kim destroy_curseg(sbi); 2424351df4b2SJaegeuk Kim destroy_free_segmap(sbi); 2425351df4b2SJaegeuk Kim destroy_sit_info(sbi); 2426351df4b2SJaegeuk Kim sbi->sm_info = NULL; 2427351df4b2SJaegeuk Kim kfree(sm_info); 2428351df4b2SJaegeuk Kim } 24297fd9e544SJaegeuk Kim 24307fd9e544SJaegeuk Kim int __init create_segment_manager_caches(void) 24317fd9e544SJaegeuk Kim { 24327fd9e544SJaegeuk Kim discard_entry_slab = f2fs_kmem_cache_create("discard_entry", 2433e8512d2eSGu Zheng sizeof(struct discard_entry)); 24347fd9e544SJaegeuk Kim if (!discard_entry_slab) 2435184a5cd2SChao Yu goto fail; 2436184a5cd2SChao Yu 2437184a5cd2SChao Yu sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set", 2438c9ee0085SChangman Lee sizeof(struct sit_entry_set)); 2439184a5cd2SChao Yu if (!sit_entry_set_slab) 2440184a5cd2SChao Yu goto destory_discard_entry; 244188b88a66SJaegeuk Kim 244288b88a66SJaegeuk Kim inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry", 244388b88a66SJaegeuk Kim sizeof(struct inmem_pages)); 244488b88a66SJaegeuk Kim if (!inmem_entry_slab) 244588b88a66SJaegeuk Kim goto destroy_sit_entry_set; 24467fd9e544SJaegeuk Kim return 0; 2447184a5cd2SChao Yu 244888b88a66SJaegeuk Kim destroy_sit_entry_set: 244988b88a66SJaegeuk Kim kmem_cache_destroy(sit_entry_set_slab); 2450184a5cd2SChao Yu destory_discard_entry: 2451184a5cd2SChao Yu kmem_cache_destroy(discard_entry_slab); 2452184a5cd2SChao Yu fail: 2453184a5cd2SChao Yu return -ENOMEM; 24547fd9e544SJaegeuk Kim } 24557fd9e544SJaegeuk Kim 24567fd9e544SJaegeuk Kim void destroy_segment_manager_caches(void) 24577fd9e544SJaegeuk Kim { 2458184a5cd2SChao Yu kmem_cache_destroy(sit_entry_set_slab); 24597fd9e544SJaegeuk Kim kmem_cache_destroy(discard_entry_slab); 246088b88a66SJaegeuk Kim kmem_cache_destroy(inmem_entry_slab); 24617fd9e544SJaegeuk Kim } 2462