10a8165d7SJaegeuk Kim /* 2351df4b2SJaegeuk Kim * fs/f2fs/segment.c 3351df4b2SJaegeuk Kim * 4351df4b2SJaegeuk Kim * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5351df4b2SJaegeuk Kim * http://www.samsung.com/ 6351df4b2SJaegeuk Kim * 7351df4b2SJaegeuk Kim * This program is free software; you can redistribute it and/or modify 8351df4b2SJaegeuk Kim * it under the terms of the GNU General Public License version 2 as 9351df4b2SJaegeuk Kim * published by the Free Software Foundation. 10351df4b2SJaegeuk Kim */ 11351df4b2SJaegeuk Kim #include <linux/fs.h> 12351df4b2SJaegeuk Kim #include <linux/f2fs_fs.h> 13351df4b2SJaegeuk Kim #include <linux/bio.h> 14351df4b2SJaegeuk Kim #include <linux/blkdev.h> 15690e4a3eSGeert Uytterhoeven #include <linux/prefetch.h> 166b4afdd7SJaegeuk Kim #include <linux/kthread.h> 1774de593aSChao Yu #include <linux/swap.h> 1860b99b48SJaegeuk Kim #include <linux/timer.h> 19351df4b2SJaegeuk Kim 20351df4b2SJaegeuk Kim #include "f2fs.h" 21351df4b2SJaegeuk Kim #include "segment.h" 22351df4b2SJaegeuk Kim #include "node.h" 239e4ded3fSJaegeuk Kim #include "trace.h" 246ec178daSNamjae Jeon #include <trace/events/f2fs.h> 25351df4b2SJaegeuk Kim 269a7f143aSChangman Lee #define __reverse_ffz(x) __reverse_ffs(~(x)) 279a7f143aSChangman Lee 287fd9e544SJaegeuk Kim static struct kmem_cache *discard_entry_slab; 29184a5cd2SChao Yu static struct kmem_cache *sit_entry_set_slab; 3088b88a66SJaegeuk Kim static struct kmem_cache *inmem_entry_slab; 317fd9e544SJaegeuk Kim 32f96999c3SJaegeuk Kim static unsigned long __reverse_ulong(unsigned char *str) 33f96999c3SJaegeuk Kim { 34f96999c3SJaegeuk Kim unsigned long tmp = 0; 35f96999c3SJaegeuk Kim int shift = 24, idx = 0; 36f96999c3SJaegeuk Kim 37f96999c3SJaegeuk Kim #if BITS_PER_LONG == 64 38f96999c3SJaegeuk Kim shift = 56; 39f96999c3SJaegeuk Kim #endif 40f96999c3SJaegeuk Kim while (shift >= 0) { 41f96999c3SJaegeuk Kim tmp |= (unsigned long)str[idx++] << shift; 42f96999c3SJaegeuk Kim shift -= BITS_PER_BYTE; 43f96999c3SJaegeuk Kim } 44f96999c3SJaegeuk Kim return tmp; 45f96999c3SJaegeuk Kim } 46f96999c3SJaegeuk Kim 479a7f143aSChangman Lee /* 489a7f143aSChangman Lee * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since 499a7f143aSChangman Lee * MSB and LSB are reversed in a byte by f2fs_set_bit. 509a7f143aSChangman Lee */ 519a7f143aSChangman Lee static inline unsigned long __reverse_ffs(unsigned long word) 529a7f143aSChangman Lee { 539a7f143aSChangman Lee int num = 0; 549a7f143aSChangman Lee 559a7f143aSChangman Lee #if BITS_PER_LONG == 64 56f96999c3SJaegeuk Kim if ((word & 0xffffffff00000000UL) == 0) 579a7f143aSChangman Lee num += 32; 58f96999c3SJaegeuk Kim else 599a7f143aSChangman Lee word >>= 32; 609a7f143aSChangman Lee #endif 61f96999c3SJaegeuk Kim if ((word & 0xffff0000) == 0) 629a7f143aSChangman Lee num += 16; 63f96999c3SJaegeuk Kim else 649a7f143aSChangman Lee word >>= 16; 65f96999c3SJaegeuk Kim 66f96999c3SJaegeuk Kim if ((word & 0xff00) == 0) 679a7f143aSChangman Lee num += 8; 68f96999c3SJaegeuk Kim else 699a7f143aSChangman Lee word >>= 8; 70f96999c3SJaegeuk Kim 719a7f143aSChangman Lee if ((word & 0xf0) == 0) 729a7f143aSChangman Lee num += 4; 739a7f143aSChangman Lee else 749a7f143aSChangman Lee word >>= 4; 75f96999c3SJaegeuk Kim 769a7f143aSChangman Lee if ((word & 0xc) == 0) 779a7f143aSChangman Lee num += 2; 789a7f143aSChangman Lee else 799a7f143aSChangman Lee word >>= 2; 80f96999c3SJaegeuk Kim 819a7f143aSChangman Lee if ((word & 0x2) == 0) 829a7f143aSChangman Lee num += 1; 839a7f143aSChangman Lee return num; 849a7f143aSChangman Lee } 859a7f143aSChangman Lee 869a7f143aSChangman Lee /* 87e1c42045Sarter97 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because 889a7f143aSChangman Lee * f2fs_set_bit makes MSB and LSB reversed in a byte. 89692223d1SFan Li * @size must be integral times of unsigned long. 909a7f143aSChangman Lee * Example: 91f96999c3SJaegeuk Kim * MSB <--> LSB 92f96999c3SJaegeuk Kim * f2fs_set_bit(0, bitmap) => 1000 0000 93f96999c3SJaegeuk Kim * f2fs_set_bit(7, bitmap) => 0000 0001 949a7f143aSChangman Lee */ 959a7f143aSChangman Lee static unsigned long __find_rev_next_bit(const unsigned long *addr, 969a7f143aSChangman Lee unsigned long size, unsigned long offset) 979a7f143aSChangman Lee { 989a7f143aSChangman Lee const unsigned long *p = addr + BIT_WORD(offset); 99692223d1SFan Li unsigned long result = size; 1009a7f143aSChangman Lee unsigned long tmp; 1019a7f143aSChangman Lee 1029a7f143aSChangman Lee if (offset >= size) 1039a7f143aSChangman Lee return size; 1049a7f143aSChangman Lee 105692223d1SFan Li size -= (offset & ~(BITS_PER_LONG - 1)); 1069a7f143aSChangman Lee offset %= BITS_PER_LONG; 107692223d1SFan Li 108692223d1SFan Li while (1) { 109692223d1SFan Li if (*p == 0) 110692223d1SFan Li goto pass; 1119a7f143aSChangman Lee 112f96999c3SJaegeuk Kim tmp = __reverse_ulong((unsigned char *)p); 113692223d1SFan Li 114f96999c3SJaegeuk Kim tmp &= ~0UL >> offset; 1159a7f143aSChangman Lee if (size < BITS_PER_LONG) 116692223d1SFan Li tmp &= (~0UL << (BITS_PER_LONG - size)); 1179a7f143aSChangman Lee if (tmp) 118692223d1SFan Li goto found; 119692223d1SFan Li pass: 120692223d1SFan Li if (size <= BITS_PER_LONG) 121692223d1SFan Li break; 1229a7f143aSChangman Lee size -= BITS_PER_LONG; 123692223d1SFan Li offset = 0; 124f96999c3SJaegeuk Kim p++; 1259a7f143aSChangman Lee } 1269a7f143aSChangman Lee return result; 127692223d1SFan Li found: 128692223d1SFan Li return result - size + __reverse_ffs(tmp); 1299a7f143aSChangman Lee } 1309a7f143aSChangman Lee 1319a7f143aSChangman Lee static unsigned long __find_rev_next_zero_bit(const unsigned long *addr, 1329a7f143aSChangman Lee unsigned long size, unsigned long offset) 1339a7f143aSChangman Lee { 1349a7f143aSChangman Lee const unsigned long *p = addr + BIT_WORD(offset); 13580609448SJaegeuk Kim unsigned long result = size; 1369a7f143aSChangman Lee unsigned long tmp; 1379a7f143aSChangman Lee 1389a7f143aSChangman Lee if (offset >= size) 1399a7f143aSChangman Lee return size; 1409a7f143aSChangman Lee 14180609448SJaegeuk Kim size -= (offset & ~(BITS_PER_LONG - 1)); 1429a7f143aSChangman Lee offset %= BITS_PER_LONG; 14380609448SJaegeuk Kim 14480609448SJaegeuk Kim while (1) { 14580609448SJaegeuk Kim if (*p == ~0UL) 14680609448SJaegeuk Kim goto pass; 1479a7f143aSChangman Lee 148f96999c3SJaegeuk Kim tmp = __reverse_ulong((unsigned char *)p); 149f96999c3SJaegeuk Kim 15080609448SJaegeuk Kim if (offset) 15180609448SJaegeuk Kim tmp |= ~0UL << (BITS_PER_LONG - offset); 1529a7f143aSChangman Lee if (size < BITS_PER_LONG) 15380609448SJaegeuk Kim tmp |= ~0UL >> size; 154f96999c3SJaegeuk Kim if (tmp != ~0UL) 15580609448SJaegeuk Kim goto found; 15680609448SJaegeuk Kim pass: 15780609448SJaegeuk Kim if (size <= BITS_PER_LONG) 15880609448SJaegeuk Kim break; 1599a7f143aSChangman Lee size -= BITS_PER_LONG; 16080609448SJaegeuk Kim offset = 0; 161f96999c3SJaegeuk Kim p++; 1629a7f143aSChangman Lee } 1639a7f143aSChangman Lee return result; 16480609448SJaegeuk Kim found: 16580609448SJaegeuk Kim return result - size + __reverse_ffz(tmp); 1669a7f143aSChangman Lee } 1679a7f143aSChangman Lee 16888b88a66SJaegeuk Kim void register_inmem_page(struct inode *inode, struct page *page) 16988b88a66SJaegeuk Kim { 17088b88a66SJaegeuk Kim struct f2fs_inode_info *fi = F2FS_I(inode); 17188b88a66SJaegeuk Kim struct inmem_pages *new; 1729be32d72SJaegeuk Kim 1739e4ded3fSJaegeuk Kim f2fs_trace_pid(page); 1740722b101SJaegeuk Kim 175decd36b6SChao Yu set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE); 176decd36b6SChao Yu SetPagePrivate(page); 177decd36b6SChao Yu 17888b88a66SJaegeuk Kim new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS); 17988b88a66SJaegeuk Kim 18088b88a66SJaegeuk Kim /* add atomic page indices to the list */ 18188b88a66SJaegeuk Kim new->page = page; 18288b88a66SJaegeuk Kim INIT_LIST_HEAD(&new->list); 183decd36b6SChao Yu 18488b88a66SJaegeuk Kim /* increase reference count with clean state */ 18588b88a66SJaegeuk Kim mutex_lock(&fi->inmem_lock); 18688b88a66SJaegeuk Kim get_page(page); 18788b88a66SJaegeuk Kim list_add_tail(&new->list, &fi->inmem_pages); 1888dcf2ff7SJaegeuk Kim inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); 18988b88a66SJaegeuk Kim mutex_unlock(&fi->inmem_lock); 1908ce67cb0SJaegeuk Kim 1918ce67cb0SJaegeuk Kim trace_f2fs_register_inmem_page(page, INMEM); 19288b88a66SJaegeuk Kim } 19388b88a66SJaegeuk Kim 19428bc106bSChao Yu static int __revoke_inmem_pages(struct inode *inode, 19528bc106bSChao Yu struct list_head *head, bool drop, bool recover) 19629b96b54SChao Yu { 19728bc106bSChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 19829b96b54SChao Yu struct inmem_pages *cur, *tmp; 19928bc106bSChao Yu int err = 0; 20029b96b54SChao Yu 20129b96b54SChao Yu list_for_each_entry_safe(cur, tmp, head, list) { 20228bc106bSChao Yu struct page *page = cur->page; 20329b96b54SChao Yu 20428bc106bSChao Yu if (drop) 20528bc106bSChao Yu trace_f2fs_commit_inmem_page(page, INMEM_DROP); 20628bc106bSChao Yu 20728bc106bSChao Yu lock_page(page); 20828bc106bSChao Yu 20928bc106bSChao Yu if (recover) { 21028bc106bSChao Yu struct dnode_of_data dn; 21128bc106bSChao Yu struct node_info ni; 21228bc106bSChao Yu 21328bc106bSChao Yu trace_f2fs_commit_inmem_page(page, INMEM_REVOKE); 21428bc106bSChao Yu 21528bc106bSChao Yu set_new_dnode(&dn, inode, NULL, NULL, 0); 21628bc106bSChao Yu if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) { 21728bc106bSChao Yu err = -EAGAIN; 21828bc106bSChao Yu goto next; 21928bc106bSChao Yu } 22028bc106bSChao Yu get_node_info(sbi, dn.nid, &ni); 22128bc106bSChao Yu f2fs_replace_block(sbi, &dn, dn.data_blkaddr, 22228bc106bSChao Yu cur->old_addr, ni.version, true, true); 22328bc106bSChao Yu f2fs_put_dnode(&dn); 22428bc106bSChao Yu } 22528bc106bSChao Yu next: 22663c52d78SJaegeuk Kim /* we don't need to invalidate this in the sccessful status */ 22763c52d78SJaegeuk Kim if (drop || recover) 22828bc106bSChao Yu ClearPageUptodate(page); 22928bc106bSChao Yu set_page_private(page, 0); 230c81ced05SChao Yu ClearPagePrivate(page); 23128bc106bSChao Yu f2fs_put_page(page, 1); 23229b96b54SChao Yu 23329b96b54SChao Yu list_del(&cur->list); 23429b96b54SChao Yu kmem_cache_free(inmem_entry_slab, cur); 23529b96b54SChao Yu dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); 23629b96b54SChao Yu } 23728bc106bSChao Yu return err; 23829b96b54SChao Yu } 23929b96b54SChao Yu 24029b96b54SChao Yu void drop_inmem_pages(struct inode *inode) 24129b96b54SChao Yu { 24229b96b54SChao Yu struct f2fs_inode_info *fi = F2FS_I(inode); 24329b96b54SChao Yu 24426dc3d44SJaegeuk Kim clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); 24526dc3d44SJaegeuk Kim 24629b96b54SChao Yu mutex_lock(&fi->inmem_lock); 24728bc106bSChao Yu __revoke_inmem_pages(inode, &fi->inmem_pages, true, false); 24829b96b54SChao Yu mutex_unlock(&fi->inmem_lock); 24929b96b54SChao Yu } 25029b96b54SChao Yu 25128bc106bSChao Yu static int __commit_inmem_pages(struct inode *inode, 25228bc106bSChao Yu struct list_head *revoke_list) 25388b88a66SJaegeuk Kim { 25488b88a66SJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 25588b88a66SJaegeuk Kim struct f2fs_inode_info *fi = F2FS_I(inode); 25688b88a66SJaegeuk Kim struct inmem_pages *cur, *tmp; 25788b88a66SJaegeuk Kim struct f2fs_io_info fio = { 25805ca3632SJaegeuk Kim .sbi = sbi, 25988b88a66SJaegeuk Kim .type = DATA, 260*04d328deSMike Christie .op = REQ_OP_WRITE, 261*04d328deSMike Christie .op_flags = WRITE_SYNC | REQ_PRIO, 2624375a336SJaegeuk Kim .encrypted_page = NULL, 26388b88a66SJaegeuk Kim }; 26429b96b54SChao Yu bool submit_bio = false; 265edb27deeSJaegeuk Kim int err = 0; 26688b88a66SJaegeuk Kim 26788b88a66SJaegeuk Kim list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) { 26828bc106bSChao Yu struct page *page = cur->page; 26928bc106bSChao Yu 27028bc106bSChao Yu lock_page(page); 27128bc106bSChao Yu if (page->mapping == inode->i_mapping) { 27228bc106bSChao Yu trace_f2fs_commit_inmem_page(page, INMEM); 27328bc106bSChao Yu 27428bc106bSChao Yu set_page_dirty(page); 27528bc106bSChao Yu f2fs_wait_on_page_writeback(page, DATA, true); 27628bc106bSChao Yu if (clear_page_dirty_for_io(page)) 27788b88a66SJaegeuk Kim inode_dec_dirty_pages(inode); 27828bc106bSChao Yu 27928bc106bSChao Yu fio.page = page; 280edb27deeSJaegeuk Kim err = do_write_data_page(&fio); 281edb27deeSJaegeuk Kim if (err) { 28228bc106bSChao Yu unlock_page(page); 283edb27deeSJaegeuk Kim break; 284edb27deeSJaegeuk Kim } 28528bc106bSChao Yu 28628bc106bSChao Yu /* record old blkaddr for revoking */ 28728bc106bSChao Yu cur->old_addr = fio.old_blkaddr; 28828bc106bSChao Yu 28928bc106bSChao Yu clear_cold_data(page); 2902b246fb0SJaegeuk Kim submit_bio = true; 29188b88a66SJaegeuk Kim } 29228bc106bSChao Yu unlock_page(page); 29328bc106bSChao Yu list_move_tail(&cur->list, revoke_list); 29488b88a66SJaegeuk Kim } 29529b96b54SChao Yu 29629b96b54SChao Yu if (submit_bio) 29729b96b54SChao Yu f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE); 29828bc106bSChao Yu 29928bc106bSChao Yu if (!err) 30028bc106bSChao Yu __revoke_inmem_pages(inode, revoke_list, false, false); 30128bc106bSChao Yu 30229b96b54SChao Yu return err; 30329b96b54SChao Yu } 30429b96b54SChao Yu 30529b96b54SChao Yu int commit_inmem_pages(struct inode *inode) 30629b96b54SChao Yu { 30729b96b54SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 30829b96b54SChao Yu struct f2fs_inode_info *fi = F2FS_I(inode); 30928bc106bSChao Yu struct list_head revoke_list; 31028bc106bSChao Yu int err; 31129b96b54SChao Yu 31228bc106bSChao Yu INIT_LIST_HEAD(&revoke_list); 31329b96b54SChao Yu f2fs_balance_fs(sbi, true); 31429b96b54SChao Yu f2fs_lock_op(sbi); 31529b96b54SChao Yu 31629b96b54SChao Yu mutex_lock(&fi->inmem_lock); 31728bc106bSChao Yu err = __commit_inmem_pages(inode, &revoke_list); 31828bc106bSChao Yu if (err) { 31928bc106bSChao Yu int ret; 32028bc106bSChao Yu /* 32128bc106bSChao Yu * try to revoke all committed pages, but still we could fail 32228bc106bSChao Yu * due to no memory or other reason, if that happened, EAGAIN 32328bc106bSChao Yu * will be returned, which means in such case, transaction is 32428bc106bSChao Yu * already not integrity, caller should use journal to do the 32528bc106bSChao Yu * recovery or rewrite & commit last transaction. For other 32628bc106bSChao Yu * error number, revoking was done by filesystem itself. 32728bc106bSChao Yu */ 32828bc106bSChao Yu ret = __revoke_inmem_pages(inode, &revoke_list, false, true); 32928bc106bSChao Yu if (ret) 33028bc106bSChao Yu err = ret; 33128bc106bSChao Yu 33228bc106bSChao Yu /* drop all uncommitted pages */ 33328bc106bSChao Yu __revoke_inmem_pages(inode, &fi->inmem_pages, true, false); 33428bc106bSChao Yu } 33588b88a66SJaegeuk Kim mutex_unlock(&fi->inmem_lock); 33688b88a66SJaegeuk Kim 33788b88a66SJaegeuk Kim f2fs_unlock_op(sbi); 338edb27deeSJaegeuk Kim return err; 33988b88a66SJaegeuk Kim } 34088b88a66SJaegeuk Kim 3410a8165d7SJaegeuk Kim /* 342351df4b2SJaegeuk Kim * This function balances dirty node and dentry pages. 343351df4b2SJaegeuk Kim * In addition, it controls garbage collection. 344351df4b2SJaegeuk Kim */ 3452c4db1a6SJaegeuk Kim void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) 346351df4b2SJaegeuk Kim { 3472c4db1a6SJaegeuk Kim if (!need) 3482c4db1a6SJaegeuk Kim return; 349351df4b2SJaegeuk Kim /* 350029cd28cSJaegeuk Kim * We should do GC or end up with checkpoint, if there are so many dirty 351029cd28cSJaegeuk Kim * dir/node pages without enough free segments. 352351df4b2SJaegeuk Kim */ 35343727527SJaegeuk Kim if (has_not_enough_free_secs(sbi, 0)) { 354351df4b2SJaegeuk Kim mutex_lock(&sbi->gc_mutex); 355d530d4d8SChao Yu f2fs_gc(sbi, false); 356351df4b2SJaegeuk Kim } 357351df4b2SJaegeuk Kim } 358351df4b2SJaegeuk Kim 3594660f9c0SJaegeuk Kim void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) 3604660f9c0SJaegeuk Kim { 3611dcc336bSChao Yu /* try to shrink extent cache when there is no enough memory */ 362554df79eSJaegeuk Kim if (!available_free_memory(sbi, EXTENT_CACHE)) 3631dcc336bSChao Yu f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); 3641dcc336bSChao Yu 3651b38dc8eSJaegeuk Kim /* check the # of cached NAT entries */ 3661b38dc8eSJaegeuk Kim if (!available_free_memory(sbi, NAT_ENTRIES)) 3671b38dc8eSJaegeuk Kim try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); 3681b38dc8eSJaegeuk Kim 36931696580SChao Yu if (!available_free_memory(sbi, FREE_NIDS)) 37031696580SChao Yu try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES); 37131696580SChao Yu 3721b38dc8eSJaegeuk Kim /* checkpoint is the only way to shrink partial cached entries */ 3731b38dc8eSJaegeuk Kim if (!available_free_memory(sbi, NAT_ENTRIES) || 37460b99b48SJaegeuk Kim !available_free_memory(sbi, INO_ENTRIES) || 3757d768d2cSChao Yu excess_prefree_segs(sbi) || 3767d768d2cSChao Yu excess_dirty_nats(sbi) || 377d0239e1bSJaegeuk Kim (is_idle(sbi) && f2fs_time_over(sbi, CP_TIME))) { 378e9f5b8b8SChao Yu if (test_opt(sbi, DATA_FLUSH)) { 379e9f5b8b8SChao Yu struct blk_plug plug; 380e9f5b8b8SChao Yu 381e9f5b8b8SChao Yu blk_start_plug(&plug); 38236b35a0dSChao Yu sync_dirty_inodes(sbi, FILE_INODE); 383e9f5b8b8SChao Yu blk_finish_plug(&plug); 384e9f5b8b8SChao Yu } 3854660f9c0SJaegeuk Kim f2fs_sync_fs(sbi->sb, true); 38642190d2aSJaegeuk Kim stat_inc_bg_cp_count(sbi->stat_info); 3874660f9c0SJaegeuk Kim } 38836b35a0dSChao Yu } 3894660f9c0SJaegeuk Kim 3902163d198SGu Zheng static int issue_flush_thread(void *data) 3916b4afdd7SJaegeuk Kim { 3926b4afdd7SJaegeuk Kim struct f2fs_sb_info *sbi = data; 393a688b9d9SGu Zheng struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; 394a688b9d9SGu Zheng wait_queue_head_t *q = &fcc->flush_wait_queue; 3956b4afdd7SJaegeuk Kim repeat: 3966b4afdd7SJaegeuk Kim if (kthread_should_stop()) 3976b4afdd7SJaegeuk Kim return 0; 3986b4afdd7SJaegeuk Kim 399721bd4d5SGu Zheng if (!llist_empty(&fcc->issue_list)) { 400740432f8SJaegeuk Kim struct bio *bio; 4016b4afdd7SJaegeuk Kim struct flush_cmd *cmd, *next; 4026b4afdd7SJaegeuk Kim int ret; 4036b4afdd7SJaegeuk Kim 404740432f8SJaegeuk Kim bio = f2fs_bio_alloc(0); 405740432f8SJaegeuk Kim 406721bd4d5SGu Zheng fcc->dispatch_list = llist_del_all(&fcc->issue_list); 407721bd4d5SGu Zheng fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); 408721bd4d5SGu Zheng 4096b4afdd7SJaegeuk Kim bio->bi_bdev = sbi->sb->s_bdev; 410*04d328deSMike Christie bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); 4114e49ea4aSMike Christie ret = submit_bio_wait(bio); 4126b4afdd7SJaegeuk Kim 413721bd4d5SGu Zheng llist_for_each_entry_safe(cmd, next, 414721bd4d5SGu Zheng fcc->dispatch_list, llnode) { 4156b4afdd7SJaegeuk Kim cmd->ret = ret; 4166b4afdd7SJaegeuk Kim complete(&cmd->wait); 4176b4afdd7SJaegeuk Kim } 418a4ed23f2SGu Zheng bio_put(bio); 419a688b9d9SGu Zheng fcc->dispatch_list = NULL; 4206b4afdd7SJaegeuk Kim } 4216b4afdd7SJaegeuk Kim 422a688b9d9SGu Zheng wait_event_interruptible(*q, 423721bd4d5SGu Zheng kthread_should_stop() || !llist_empty(&fcc->issue_list)); 4246b4afdd7SJaegeuk Kim goto repeat; 4256b4afdd7SJaegeuk Kim } 4266b4afdd7SJaegeuk Kim 4276b4afdd7SJaegeuk Kim int f2fs_issue_flush(struct f2fs_sb_info *sbi) 4286b4afdd7SJaegeuk Kim { 429a688b9d9SGu Zheng struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; 430adf8d90bSChao Yu struct flush_cmd cmd; 4316b4afdd7SJaegeuk Kim 43224a9ee0fSJaegeuk Kim trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER), 43324a9ee0fSJaegeuk Kim test_opt(sbi, FLUSH_MERGE)); 43424a9ee0fSJaegeuk Kim 4350f7b2abdSJaegeuk Kim if (test_opt(sbi, NOBARRIER)) 4360f7b2abdSJaegeuk Kim return 0; 4370f7b2abdSJaegeuk Kim 438740432f8SJaegeuk Kim if (!test_opt(sbi, FLUSH_MERGE)) { 439740432f8SJaegeuk Kim struct bio *bio = f2fs_bio_alloc(0); 440740432f8SJaegeuk Kim int ret; 441740432f8SJaegeuk Kim 442740432f8SJaegeuk Kim bio->bi_bdev = sbi->sb->s_bdev; 443*04d328deSMike Christie bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); 4444e49ea4aSMike Christie ret = submit_bio_wait(bio); 445740432f8SJaegeuk Kim bio_put(bio); 446740432f8SJaegeuk Kim return ret; 447740432f8SJaegeuk Kim } 4486b4afdd7SJaegeuk Kim 449adf8d90bSChao Yu init_completion(&cmd.wait); 4506b4afdd7SJaegeuk Kim 451721bd4d5SGu Zheng llist_add(&cmd.llnode, &fcc->issue_list); 4526b4afdd7SJaegeuk Kim 453a688b9d9SGu Zheng if (!fcc->dispatch_list) 454a688b9d9SGu Zheng wake_up(&fcc->flush_wait_queue); 4556b4afdd7SJaegeuk Kim 456adf8d90bSChao Yu wait_for_completion(&cmd.wait); 457adf8d90bSChao Yu 458adf8d90bSChao Yu return cmd.ret; 4596b4afdd7SJaegeuk Kim } 4606b4afdd7SJaegeuk Kim 4612163d198SGu Zheng int create_flush_cmd_control(struct f2fs_sb_info *sbi) 4622163d198SGu Zheng { 4632163d198SGu Zheng dev_t dev = sbi->sb->s_bdev->bd_dev; 4642163d198SGu Zheng struct flush_cmd_control *fcc; 4652163d198SGu Zheng int err = 0; 4662163d198SGu Zheng 4672163d198SGu Zheng fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL); 4682163d198SGu Zheng if (!fcc) 4692163d198SGu Zheng return -ENOMEM; 4702163d198SGu Zheng init_waitqueue_head(&fcc->flush_wait_queue); 471721bd4d5SGu Zheng init_llist_head(&fcc->issue_list); 4726b2920a5SChao Yu SM_I(sbi)->cmd_control_info = fcc; 4732163d198SGu Zheng fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, 4742163d198SGu Zheng "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); 4752163d198SGu Zheng if (IS_ERR(fcc->f2fs_issue_flush)) { 4762163d198SGu Zheng err = PTR_ERR(fcc->f2fs_issue_flush); 4772163d198SGu Zheng kfree(fcc); 4786b2920a5SChao Yu SM_I(sbi)->cmd_control_info = NULL; 4792163d198SGu Zheng return err; 4802163d198SGu Zheng } 4812163d198SGu Zheng 4822163d198SGu Zheng return err; 4832163d198SGu Zheng } 4842163d198SGu Zheng 4852163d198SGu Zheng void destroy_flush_cmd_control(struct f2fs_sb_info *sbi) 4862163d198SGu Zheng { 4876b2920a5SChao Yu struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; 4882163d198SGu Zheng 4892163d198SGu Zheng if (fcc && fcc->f2fs_issue_flush) 4902163d198SGu Zheng kthread_stop(fcc->f2fs_issue_flush); 4912163d198SGu Zheng kfree(fcc); 4926b2920a5SChao Yu SM_I(sbi)->cmd_control_info = NULL; 4932163d198SGu Zheng } 4942163d198SGu Zheng 495351df4b2SJaegeuk Kim static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 496351df4b2SJaegeuk Kim enum dirty_type dirty_type) 497351df4b2SJaegeuk Kim { 498351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 499351df4b2SJaegeuk Kim 500351df4b2SJaegeuk Kim /* need not be added */ 501351df4b2SJaegeuk Kim if (IS_CURSEG(sbi, segno)) 502351df4b2SJaegeuk Kim return; 503351df4b2SJaegeuk Kim 504351df4b2SJaegeuk Kim if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) 505351df4b2SJaegeuk Kim dirty_i->nr_dirty[dirty_type]++; 506351df4b2SJaegeuk Kim 507351df4b2SJaegeuk Kim if (dirty_type == DIRTY) { 508351df4b2SJaegeuk Kim struct seg_entry *sentry = get_seg_entry(sbi, segno); 5094625d6aaSChangman Lee enum dirty_type t = sentry->type; 510b2f2c390SJaegeuk Kim 511ec325b52SJaegeuk Kim if (unlikely(t >= DIRTY)) { 512ec325b52SJaegeuk Kim f2fs_bug_on(sbi, 1); 513ec325b52SJaegeuk Kim return; 514ec325b52SJaegeuk Kim } 5154625d6aaSChangman Lee if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) 5164625d6aaSChangman Lee dirty_i->nr_dirty[t]++; 517351df4b2SJaegeuk Kim } 518351df4b2SJaegeuk Kim } 519351df4b2SJaegeuk Kim 520351df4b2SJaegeuk Kim static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 521351df4b2SJaegeuk Kim enum dirty_type dirty_type) 522351df4b2SJaegeuk Kim { 523351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 524351df4b2SJaegeuk Kim 525351df4b2SJaegeuk Kim if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) 526351df4b2SJaegeuk Kim dirty_i->nr_dirty[dirty_type]--; 527351df4b2SJaegeuk Kim 528351df4b2SJaegeuk Kim if (dirty_type == DIRTY) { 5294625d6aaSChangman Lee struct seg_entry *sentry = get_seg_entry(sbi, segno); 5304625d6aaSChangman Lee enum dirty_type t = sentry->type; 531b2f2c390SJaegeuk Kim 5324625d6aaSChangman Lee if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) 533b2f2c390SJaegeuk Kim dirty_i->nr_dirty[t]--; 534b2f2c390SJaegeuk Kim 5355ec4e49fSJaegeuk Kim if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0) 5365ec4e49fSJaegeuk Kim clear_bit(GET_SECNO(sbi, segno), 5375ec4e49fSJaegeuk Kim dirty_i->victim_secmap); 538351df4b2SJaegeuk Kim } 539351df4b2SJaegeuk Kim } 540351df4b2SJaegeuk Kim 5410a8165d7SJaegeuk Kim /* 542351df4b2SJaegeuk Kim * Should not occur error such as -ENOMEM. 543351df4b2SJaegeuk Kim * Adding dirty entry into seglist is not critical operation. 544351df4b2SJaegeuk Kim * If a given segment is one of current working segments, it won't be added. 545351df4b2SJaegeuk Kim */ 5468d8451afSHaicheng Li static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) 547351df4b2SJaegeuk Kim { 548351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 549351df4b2SJaegeuk Kim unsigned short valid_blocks; 550351df4b2SJaegeuk Kim 551351df4b2SJaegeuk Kim if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) 552351df4b2SJaegeuk Kim return; 553351df4b2SJaegeuk Kim 554351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 555351df4b2SJaegeuk Kim 556351df4b2SJaegeuk Kim valid_blocks = get_valid_blocks(sbi, segno, 0); 557351df4b2SJaegeuk Kim 558351df4b2SJaegeuk Kim if (valid_blocks == 0) { 559351df4b2SJaegeuk Kim __locate_dirty_segment(sbi, segno, PRE); 560351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, segno, DIRTY); 561351df4b2SJaegeuk Kim } else if (valid_blocks < sbi->blocks_per_seg) { 562351df4b2SJaegeuk Kim __locate_dirty_segment(sbi, segno, DIRTY); 563351df4b2SJaegeuk Kim } else { 564351df4b2SJaegeuk Kim /* Recovery routine with SSR needs this */ 565351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, segno, DIRTY); 566351df4b2SJaegeuk Kim } 567351df4b2SJaegeuk Kim 568351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 569351df4b2SJaegeuk Kim } 570351df4b2SJaegeuk Kim 5711e87a78dSJaegeuk Kim static int f2fs_issue_discard(struct f2fs_sb_info *sbi, 57237208879SJaegeuk Kim block_t blkstart, block_t blklen) 57337208879SJaegeuk Kim { 57455cf9cb6SChao Yu sector_t start = SECTOR_FROM_BLOCK(blkstart); 57555cf9cb6SChao Yu sector_t len = SECTOR_FROM_BLOCK(blklen); 576a66cdd98SJaegeuk Kim struct seg_entry *se; 577a66cdd98SJaegeuk Kim unsigned int offset; 578a66cdd98SJaegeuk Kim block_t i; 579a66cdd98SJaegeuk Kim 580a66cdd98SJaegeuk Kim for (i = blkstart; i < blkstart + blklen; i++) { 581a66cdd98SJaegeuk Kim se = get_seg_entry(sbi, GET_SEGNO(sbi, i)); 582a66cdd98SJaegeuk Kim offset = GET_BLKOFF_FROM_SEG0(sbi, i); 583a66cdd98SJaegeuk Kim 584a66cdd98SJaegeuk Kim if (!f2fs_test_and_set_bit(offset, se->discard_map)) 585a66cdd98SJaegeuk Kim sbi->discard_blks--; 586a66cdd98SJaegeuk Kim } 5871661d07cSJaegeuk Kim trace_f2fs_issue_discard(sbi->sb, blkstart, blklen); 5881e87a78dSJaegeuk Kim return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0); 5891e87a78dSJaegeuk Kim } 5901e87a78dSJaegeuk Kim 591e90c2d28SChao Yu bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr) 5921e87a78dSJaegeuk Kim { 59360b286c4SJaegeuk Kim int err = -EOPNOTSUPP; 59440a02be1SJaegeuk Kim 59540a02be1SJaegeuk Kim if (test_opt(sbi, DISCARD)) { 59640a02be1SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, 59740a02be1SJaegeuk Kim GET_SEGNO(sbi, blkaddr)); 59840a02be1SJaegeuk Kim unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 59940a02be1SJaegeuk Kim 60040a02be1SJaegeuk Kim if (f2fs_test_bit(offset, se->discard_map)) 601e90c2d28SChao Yu return false; 60240a02be1SJaegeuk Kim 60340a02be1SJaegeuk Kim err = f2fs_issue_discard(sbi, blkaddr, 1); 60440a02be1SJaegeuk Kim } 60540a02be1SJaegeuk Kim 606e90c2d28SChao Yu if (err) { 607381722d2SChao Yu update_meta_page(sbi, NULL, blkaddr); 608e90c2d28SChao Yu return true; 609e90c2d28SChao Yu } 610e90c2d28SChao Yu return false; 61137208879SJaegeuk Kim } 61237208879SJaegeuk Kim 613adf4983bSJaegeuk Kim static void __add_discard_entry(struct f2fs_sb_info *sbi, 614a66cdd98SJaegeuk Kim struct cp_control *cpc, struct seg_entry *se, 615a66cdd98SJaegeuk Kim unsigned int start, unsigned int end) 616b2955550SJaegeuk Kim { 617b2955550SJaegeuk Kim struct list_head *head = &SM_I(sbi)->discard_list; 618adf4983bSJaegeuk Kim struct discard_entry *new, *last; 619adf4983bSJaegeuk Kim 620adf4983bSJaegeuk Kim if (!list_empty(head)) { 621adf4983bSJaegeuk Kim last = list_last_entry(head, struct discard_entry, list); 622adf4983bSJaegeuk Kim if (START_BLOCK(sbi, cpc->trim_start) + start == 623adf4983bSJaegeuk Kim last->blkaddr + last->len) { 624adf4983bSJaegeuk Kim last->len += end - start; 625adf4983bSJaegeuk Kim goto done; 626adf4983bSJaegeuk Kim } 627adf4983bSJaegeuk Kim } 628adf4983bSJaegeuk Kim 629adf4983bSJaegeuk Kim new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS); 630adf4983bSJaegeuk Kim INIT_LIST_HEAD(&new->list); 631adf4983bSJaegeuk Kim new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start; 632adf4983bSJaegeuk Kim new->len = end - start; 633adf4983bSJaegeuk Kim list_add_tail(&new->list, head); 634adf4983bSJaegeuk Kim done: 635adf4983bSJaegeuk Kim SM_I(sbi)->nr_discards += end - start; 636adf4983bSJaegeuk Kim } 637adf4983bSJaegeuk Kim 638adf4983bSJaegeuk Kim static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) 639adf4983bSJaegeuk Kim { 640b2955550SJaegeuk Kim int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 641b2955550SJaegeuk Kim int max_blocks = sbi->blocks_per_seg; 6424b2fecc8SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); 643b2955550SJaegeuk Kim unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 644b2955550SJaegeuk Kim unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 645a66cdd98SJaegeuk Kim unsigned long *discard_map = (unsigned long *)se->discard_map; 64660a3b782SJaegeuk Kim unsigned long *dmap = SIT_I(sbi)->tmp_map; 647b2955550SJaegeuk Kim unsigned int start = 0, end = -1; 6484b2fecc8SJaegeuk Kim bool force = (cpc->reason == CP_DISCARD); 649b2955550SJaegeuk Kim int i; 650b2955550SJaegeuk Kim 651a66cdd98SJaegeuk Kim if (se->valid_blocks == max_blocks) 652b2955550SJaegeuk Kim return; 653b2955550SJaegeuk Kim 654a66cdd98SJaegeuk Kim if (!force) { 655a66cdd98SJaegeuk Kim if (!test_opt(sbi, DISCARD) || !se->valid_blocks || 656a66cdd98SJaegeuk Kim SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards) 6574b2fecc8SJaegeuk Kim return; 6584b2fecc8SJaegeuk Kim } 659b2955550SJaegeuk Kim 660b2955550SJaegeuk Kim /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ 661b2955550SJaegeuk Kim for (i = 0; i < entries; i++) 662a66cdd98SJaegeuk Kim dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] : 663d7bc2484SJaegeuk Kim (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; 664b2955550SJaegeuk Kim 6654b2fecc8SJaegeuk Kim while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) { 666b2955550SJaegeuk Kim start = __find_rev_next_bit(dmap, max_blocks, end + 1); 667b2955550SJaegeuk Kim if (start >= max_blocks) 668b2955550SJaegeuk Kim break; 669b2955550SJaegeuk Kim 670b2955550SJaegeuk Kim end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1); 671a66cdd98SJaegeuk Kim __add_discard_entry(sbi, cpc, se, start, end); 672b2955550SJaegeuk Kim } 673b2955550SJaegeuk Kim } 674b2955550SJaegeuk Kim 6754b2fecc8SJaegeuk Kim void release_discard_addrs(struct f2fs_sb_info *sbi) 6764b2fecc8SJaegeuk Kim { 6774b2fecc8SJaegeuk Kim struct list_head *head = &(SM_I(sbi)->discard_list); 6784b2fecc8SJaegeuk Kim struct discard_entry *entry, *this; 6794b2fecc8SJaegeuk Kim 6804b2fecc8SJaegeuk Kim /* drop caches */ 6814b2fecc8SJaegeuk Kim list_for_each_entry_safe(entry, this, head, list) { 6824b2fecc8SJaegeuk Kim list_del(&entry->list); 6834b2fecc8SJaegeuk Kim kmem_cache_free(discard_entry_slab, entry); 6844b2fecc8SJaegeuk Kim } 6854b2fecc8SJaegeuk Kim } 6864b2fecc8SJaegeuk Kim 6870a8165d7SJaegeuk Kim /* 688351df4b2SJaegeuk Kim * Should call clear_prefree_segments after checkpoint is done. 689351df4b2SJaegeuk Kim */ 690351df4b2SJaegeuk Kim static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) 691351df4b2SJaegeuk Kim { 692351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 693b65ee148SChao Yu unsigned int segno; 694351df4b2SJaegeuk Kim 695351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 6967cd8558bSJaegeuk Kim for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi)) 697351df4b2SJaegeuk Kim __set_test_and_free(sbi, segno); 698351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 699351df4b2SJaegeuk Kim } 700351df4b2SJaegeuk Kim 701836b5a63SJaegeuk Kim void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc) 702351df4b2SJaegeuk Kim { 703b2955550SJaegeuk Kim struct list_head *head = &(SM_I(sbi)->discard_list); 7042d7b822aSChao Yu struct discard_entry *entry, *this; 705351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 70629e59c14SChangman Lee unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; 70729e59c14SChangman Lee unsigned int start = 0, end = -1; 708351df4b2SJaegeuk Kim 709351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 71029e59c14SChangman Lee 711351df4b2SJaegeuk Kim while (1) { 71229e59c14SChangman Lee int i; 7137cd8558bSJaegeuk Kim start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1); 7147cd8558bSJaegeuk Kim if (start >= MAIN_SEGS(sbi)) 715351df4b2SJaegeuk Kim break; 7167cd8558bSJaegeuk Kim end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi), 7177cd8558bSJaegeuk Kim start + 1); 718351df4b2SJaegeuk Kim 71929e59c14SChangman Lee for (i = start; i < end; i++) 72029e59c14SChangman Lee clear_bit(i, prefree_map); 721351df4b2SJaegeuk Kim 72229e59c14SChangman Lee dirty_i->nr_dirty[PRE] -= end - start; 72329e59c14SChangman Lee 72429e59c14SChangman Lee if (!test_opt(sbi, DISCARD)) 72529e59c14SChangman Lee continue; 72629e59c14SChangman Lee 72737208879SJaegeuk Kim f2fs_issue_discard(sbi, START_BLOCK(sbi, start), 72837208879SJaegeuk Kim (end - start) << sbi->log_blocks_per_seg); 729351df4b2SJaegeuk Kim } 730351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 731b2955550SJaegeuk Kim 732b2955550SJaegeuk Kim /* send small discards */ 7332d7b822aSChao Yu list_for_each_entry_safe(entry, this, head, list) { 734836b5a63SJaegeuk Kim if (cpc->reason == CP_DISCARD && entry->len < cpc->trim_minlen) 735836b5a63SJaegeuk Kim goto skip; 73637208879SJaegeuk Kim f2fs_issue_discard(sbi, entry->blkaddr, entry->len); 737f56aa1c5SJaegeuk Kim cpc->trimmed += entry->len; 738836b5a63SJaegeuk Kim skip: 739b2955550SJaegeuk Kim list_del(&entry->list); 740b2955550SJaegeuk Kim SM_I(sbi)->nr_discards -= entry->len; 741b2955550SJaegeuk Kim kmem_cache_free(discard_entry_slab, entry); 742b2955550SJaegeuk Kim } 743351df4b2SJaegeuk Kim } 744351df4b2SJaegeuk Kim 745184a5cd2SChao Yu static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) 746351df4b2SJaegeuk Kim { 747351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 748184a5cd2SChao Yu 749184a5cd2SChao Yu if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) { 750351df4b2SJaegeuk Kim sit_i->dirty_sentries++; 751184a5cd2SChao Yu return false; 752184a5cd2SChao Yu } 753184a5cd2SChao Yu 754184a5cd2SChao Yu return true; 755351df4b2SJaegeuk Kim } 756351df4b2SJaegeuk Kim 757351df4b2SJaegeuk Kim static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, 758351df4b2SJaegeuk Kim unsigned int segno, int modified) 759351df4b2SJaegeuk Kim { 760351df4b2SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, segno); 761351df4b2SJaegeuk Kim se->type = type; 762351df4b2SJaegeuk Kim if (modified) 763351df4b2SJaegeuk Kim __mark_sit_entry_dirty(sbi, segno); 764351df4b2SJaegeuk Kim } 765351df4b2SJaegeuk Kim 766351df4b2SJaegeuk Kim static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) 767351df4b2SJaegeuk Kim { 768351df4b2SJaegeuk Kim struct seg_entry *se; 769351df4b2SJaegeuk Kim unsigned int segno, offset; 770351df4b2SJaegeuk Kim long int new_vblocks; 771351df4b2SJaegeuk Kim 772351df4b2SJaegeuk Kim segno = GET_SEGNO(sbi, blkaddr); 773351df4b2SJaegeuk Kim 774351df4b2SJaegeuk Kim se = get_seg_entry(sbi, segno); 775351df4b2SJaegeuk Kim new_vblocks = se->valid_blocks + del; 776491c0854SJaegeuk Kim offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 777351df4b2SJaegeuk Kim 7789850cf4aSJaegeuk Kim f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) || 779351df4b2SJaegeuk Kim (new_vblocks > sbi->blocks_per_seg))); 780351df4b2SJaegeuk Kim 781351df4b2SJaegeuk Kim se->valid_blocks = new_vblocks; 782351df4b2SJaegeuk Kim se->mtime = get_mtime(sbi); 783351df4b2SJaegeuk Kim SIT_I(sbi)->max_mtime = se->mtime; 784351df4b2SJaegeuk Kim 785351df4b2SJaegeuk Kim /* Update valid block bitmap */ 786351df4b2SJaegeuk Kim if (del > 0) { 78752aca074SGu Zheng if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) 78805796763SJaegeuk Kim f2fs_bug_on(sbi, 1); 789a66cdd98SJaegeuk Kim if (!f2fs_test_and_set_bit(offset, se->discard_map)) 790a66cdd98SJaegeuk Kim sbi->discard_blks--; 791351df4b2SJaegeuk Kim } else { 79252aca074SGu Zheng if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) 79305796763SJaegeuk Kim f2fs_bug_on(sbi, 1); 794a66cdd98SJaegeuk Kim if (f2fs_test_and_clear_bit(offset, se->discard_map)) 795a66cdd98SJaegeuk Kim sbi->discard_blks++; 796351df4b2SJaegeuk Kim } 797351df4b2SJaegeuk Kim if (!f2fs_test_bit(offset, se->ckpt_valid_map)) 798351df4b2SJaegeuk Kim se->ckpt_valid_blocks += del; 799351df4b2SJaegeuk Kim 800351df4b2SJaegeuk Kim __mark_sit_entry_dirty(sbi, segno); 801351df4b2SJaegeuk Kim 802351df4b2SJaegeuk Kim /* update total number of valid blocks to be written in ckpt area */ 803351df4b2SJaegeuk Kim SIT_I(sbi)->written_valid_blocks += del; 804351df4b2SJaegeuk Kim 805351df4b2SJaegeuk Kim if (sbi->segs_per_sec > 1) 806351df4b2SJaegeuk Kim get_sec_entry(sbi, segno)->valid_blocks += del; 807351df4b2SJaegeuk Kim } 808351df4b2SJaegeuk Kim 8095e443818SJaegeuk Kim void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new) 810351df4b2SJaegeuk Kim { 8115e443818SJaegeuk Kim update_sit_entry(sbi, new, 1); 8125e443818SJaegeuk Kim if (GET_SEGNO(sbi, old) != NULL_SEGNO) 8135e443818SJaegeuk Kim update_sit_entry(sbi, old, -1); 8145e443818SJaegeuk Kim 8155e443818SJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, old)); 8165e443818SJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, new)); 817351df4b2SJaegeuk Kim } 818351df4b2SJaegeuk Kim 819351df4b2SJaegeuk Kim void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) 820351df4b2SJaegeuk Kim { 821351df4b2SJaegeuk Kim unsigned int segno = GET_SEGNO(sbi, addr); 822351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 823351df4b2SJaegeuk Kim 8249850cf4aSJaegeuk Kim f2fs_bug_on(sbi, addr == NULL_ADDR); 825351df4b2SJaegeuk Kim if (addr == NEW_ADDR) 826351df4b2SJaegeuk Kim return; 827351df4b2SJaegeuk Kim 828351df4b2SJaegeuk Kim /* add it into sit main buffer */ 829351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 830351df4b2SJaegeuk Kim 831351df4b2SJaegeuk Kim update_sit_entry(sbi, addr, -1); 832351df4b2SJaegeuk Kim 833351df4b2SJaegeuk Kim /* add it into dirty seglist */ 834351df4b2SJaegeuk Kim locate_dirty_segment(sbi, segno); 835351df4b2SJaegeuk Kim 836351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 837351df4b2SJaegeuk Kim } 838351df4b2SJaegeuk Kim 8396e2c64adSJaegeuk Kim bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) 8406e2c64adSJaegeuk Kim { 8416e2c64adSJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 8426e2c64adSJaegeuk Kim unsigned int segno, offset; 8436e2c64adSJaegeuk Kim struct seg_entry *se; 8446e2c64adSJaegeuk Kim bool is_cp = false; 8456e2c64adSJaegeuk Kim 8466e2c64adSJaegeuk Kim if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) 8476e2c64adSJaegeuk Kim return true; 8486e2c64adSJaegeuk Kim 8496e2c64adSJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 8506e2c64adSJaegeuk Kim 8516e2c64adSJaegeuk Kim segno = GET_SEGNO(sbi, blkaddr); 8526e2c64adSJaegeuk Kim se = get_seg_entry(sbi, segno); 8536e2c64adSJaegeuk Kim offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 8546e2c64adSJaegeuk Kim 8556e2c64adSJaegeuk Kim if (f2fs_test_bit(offset, se->ckpt_valid_map)) 8566e2c64adSJaegeuk Kim is_cp = true; 8576e2c64adSJaegeuk Kim 8586e2c64adSJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 8596e2c64adSJaegeuk Kim 8606e2c64adSJaegeuk Kim return is_cp; 8616e2c64adSJaegeuk Kim } 8626e2c64adSJaegeuk Kim 8630a8165d7SJaegeuk Kim /* 864351df4b2SJaegeuk Kim * This function should be resided under the curseg_mutex lock 865351df4b2SJaegeuk Kim */ 866351df4b2SJaegeuk Kim static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, 867e79efe3bSHaicheng Li struct f2fs_summary *sum) 868351df4b2SJaegeuk Kim { 869351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 870351df4b2SJaegeuk Kim void *addr = curseg->sum_blk; 871e79efe3bSHaicheng Li addr += curseg->next_blkoff * sizeof(struct f2fs_summary); 872351df4b2SJaegeuk Kim memcpy(addr, sum, sizeof(struct f2fs_summary)); 873351df4b2SJaegeuk Kim } 874351df4b2SJaegeuk Kim 8750a8165d7SJaegeuk Kim /* 876351df4b2SJaegeuk Kim * Calculate the number of current summary pages for writing 877351df4b2SJaegeuk Kim */ 8783fa06d7bSChao Yu int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) 879351df4b2SJaegeuk Kim { 880351df4b2SJaegeuk Kim int valid_sum_count = 0; 8819a47938bSFan Li int i, sum_in_page; 882351df4b2SJaegeuk Kim 883351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 884351df4b2SJaegeuk Kim if (sbi->ckpt->alloc_type[i] == SSR) 885351df4b2SJaegeuk Kim valid_sum_count += sbi->blocks_per_seg; 8863fa06d7bSChao Yu else { 8873fa06d7bSChao Yu if (for_ra) 8883fa06d7bSChao Yu valid_sum_count += le16_to_cpu( 8893fa06d7bSChao Yu F2FS_CKPT(sbi)->cur_data_blkoff[i]); 890351df4b2SJaegeuk Kim else 891351df4b2SJaegeuk Kim valid_sum_count += curseg_blkoff(sbi, i); 892351df4b2SJaegeuk Kim } 8933fa06d7bSChao Yu } 894351df4b2SJaegeuk Kim 89509cbfeafSKirill A. Shutemov sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE - 8969a47938bSFan Li SUM_FOOTER_SIZE) / SUMMARY_SIZE; 8979a47938bSFan Li if (valid_sum_count <= sum_in_page) 898351df4b2SJaegeuk Kim return 1; 8999a47938bSFan Li else if ((valid_sum_count - sum_in_page) <= 90009cbfeafSKirill A. Shutemov (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) 901351df4b2SJaegeuk Kim return 2; 902351df4b2SJaegeuk Kim return 3; 903351df4b2SJaegeuk Kim } 904351df4b2SJaegeuk Kim 9050a8165d7SJaegeuk Kim /* 906351df4b2SJaegeuk Kim * Caller should put this summary page 907351df4b2SJaegeuk Kim */ 908351df4b2SJaegeuk Kim struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) 909351df4b2SJaegeuk Kim { 910351df4b2SJaegeuk Kim return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno)); 911351df4b2SJaegeuk Kim } 912351df4b2SJaegeuk Kim 913381722d2SChao Yu void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr) 914381722d2SChao Yu { 915381722d2SChao Yu struct page *page = grab_meta_page(sbi, blk_addr); 916381722d2SChao Yu void *dst = page_address(page); 917381722d2SChao Yu 918381722d2SChao Yu if (src) 91909cbfeafSKirill A. Shutemov memcpy(dst, src, PAGE_SIZE); 920381722d2SChao Yu else 92109cbfeafSKirill A. Shutemov memset(dst, 0, PAGE_SIZE); 922381722d2SChao Yu set_page_dirty(page); 923381722d2SChao Yu f2fs_put_page(page, 1); 924381722d2SChao Yu } 925381722d2SChao Yu 926351df4b2SJaegeuk Kim static void write_sum_page(struct f2fs_sb_info *sbi, 927351df4b2SJaegeuk Kim struct f2fs_summary_block *sum_blk, block_t blk_addr) 928351df4b2SJaegeuk Kim { 929381722d2SChao Yu update_meta_page(sbi, (void *)sum_blk, blk_addr); 930351df4b2SJaegeuk Kim } 931351df4b2SJaegeuk Kim 932b7ad7512SChao Yu static void write_current_sum_page(struct f2fs_sb_info *sbi, 933b7ad7512SChao Yu int type, block_t blk_addr) 934b7ad7512SChao Yu { 935b7ad7512SChao Yu struct curseg_info *curseg = CURSEG_I(sbi, type); 936b7ad7512SChao Yu struct page *page = grab_meta_page(sbi, blk_addr); 937b7ad7512SChao Yu struct f2fs_summary_block *src = curseg->sum_blk; 938b7ad7512SChao Yu struct f2fs_summary_block *dst; 939b7ad7512SChao Yu 940b7ad7512SChao Yu dst = (struct f2fs_summary_block *)page_address(page); 941b7ad7512SChao Yu 942b7ad7512SChao Yu mutex_lock(&curseg->curseg_mutex); 943b7ad7512SChao Yu 944b7ad7512SChao Yu down_read(&curseg->journal_rwsem); 945b7ad7512SChao Yu memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE); 946b7ad7512SChao Yu up_read(&curseg->journal_rwsem); 947b7ad7512SChao Yu 948b7ad7512SChao Yu memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE); 949b7ad7512SChao Yu memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE); 950b7ad7512SChao Yu 951b7ad7512SChao Yu mutex_unlock(&curseg->curseg_mutex); 952b7ad7512SChao Yu 953b7ad7512SChao Yu set_page_dirty(page); 954b7ad7512SChao Yu f2fs_put_page(page, 1); 955b7ad7512SChao Yu } 956b7ad7512SChao Yu 95760374688SJaegeuk Kim static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) 95860374688SJaegeuk Kim { 95960374688SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 96081fb5e87SHaicheng Li unsigned int segno = curseg->segno + 1; 96160374688SJaegeuk Kim struct free_segmap_info *free_i = FREE_I(sbi); 96260374688SJaegeuk Kim 9637cd8558bSJaegeuk Kim if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec) 96481fb5e87SHaicheng Li return !test_bit(segno, free_i->free_segmap); 96560374688SJaegeuk Kim return 0; 96660374688SJaegeuk Kim } 96760374688SJaegeuk Kim 9680a8165d7SJaegeuk Kim /* 969351df4b2SJaegeuk Kim * Find a new segment from the free segments bitmap to right order 970351df4b2SJaegeuk Kim * This function should be returned with success, otherwise BUG 971351df4b2SJaegeuk Kim */ 972351df4b2SJaegeuk Kim static void get_new_segment(struct f2fs_sb_info *sbi, 973351df4b2SJaegeuk Kim unsigned int *newseg, bool new_sec, int dir) 974351df4b2SJaegeuk Kim { 975351df4b2SJaegeuk Kim struct free_segmap_info *free_i = FREE_I(sbi); 976351df4b2SJaegeuk Kim unsigned int segno, secno, zoneno; 9777cd8558bSJaegeuk Kim unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone; 978351df4b2SJaegeuk Kim unsigned int hint = *newseg / sbi->segs_per_sec; 979351df4b2SJaegeuk Kim unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg); 980351df4b2SJaegeuk Kim unsigned int left_start = hint; 981351df4b2SJaegeuk Kim bool init = true; 982351df4b2SJaegeuk Kim int go_left = 0; 983351df4b2SJaegeuk Kim int i; 984351df4b2SJaegeuk Kim 9851a118ccfSChao Yu spin_lock(&free_i->segmap_lock); 986351df4b2SJaegeuk Kim 987351df4b2SJaegeuk Kim if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { 988351df4b2SJaegeuk Kim segno = find_next_zero_bit(free_i->free_segmap, 9890ab14356SChao Yu (hint + 1) * sbi->segs_per_sec, *newseg + 1); 9900ab14356SChao Yu if (segno < (hint + 1) * sbi->segs_per_sec) 991351df4b2SJaegeuk Kim goto got_it; 992351df4b2SJaegeuk Kim } 993351df4b2SJaegeuk Kim find_other_zone: 9947cd8558bSJaegeuk Kim secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); 9957cd8558bSJaegeuk Kim if (secno >= MAIN_SECS(sbi)) { 996351df4b2SJaegeuk Kim if (dir == ALLOC_RIGHT) { 997351df4b2SJaegeuk Kim secno = find_next_zero_bit(free_i->free_secmap, 9987cd8558bSJaegeuk Kim MAIN_SECS(sbi), 0); 9997cd8558bSJaegeuk Kim f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi)); 1000351df4b2SJaegeuk Kim } else { 1001351df4b2SJaegeuk Kim go_left = 1; 1002351df4b2SJaegeuk Kim left_start = hint - 1; 1003351df4b2SJaegeuk Kim } 1004351df4b2SJaegeuk Kim } 1005351df4b2SJaegeuk Kim if (go_left == 0) 1006351df4b2SJaegeuk Kim goto skip_left; 1007351df4b2SJaegeuk Kim 1008351df4b2SJaegeuk Kim while (test_bit(left_start, free_i->free_secmap)) { 1009351df4b2SJaegeuk Kim if (left_start > 0) { 1010351df4b2SJaegeuk Kim left_start--; 1011351df4b2SJaegeuk Kim continue; 1012351df4b2SJaegeuk Kim } 1013351df4b2SJaegeuk Kim left_start = find_next_zero_bit(free_i->free_secmap, 10147cd8558bSJaegeuk Kim MAIN_SECS(sbi), 0); 10157cd8558bSJaegeuk Kim f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi)); 1016351df4b2SJaegeuk Kim break; 1017351df4b2SJaegeuk Kim } 1018351df4b2SJaegeuk Kim secno = left_start; 1019351df4b2SJaegeuk Kim skip_left: 1020351df4b2SJaegeuk Kim hint = secno; 1021351df4b2SJaegeuk Kim segno = secno * sbi->segs_per_sec; 1022351df4b2SJaegeuk Kim zoneno = secno / sbi->secs_per_zone; 1023351df4b2SJaegeuk Kim 1024351df4b2SJaegeuk Kim /* give up on finding another zone */ 1025351df4b2SJaegeuk Kim if (!init) 1026351df4b2SJaegeuk Kim goto got_it; 1027351df4b2SJaegeuk Kim if (sbi->secs_per_zone == 1) 1028351df4b2SJaegeuk Kim goto got_it; 1029351df4b2SJaegeuk Kim if (zoneno == old_zoneno) 1030351df4b2SJaegeuk Kim goto got_it; 1031351df4b2SJaegeuk Kim if (dir == ALLOC_LEFT) { 1032351df4b2SJaegeuk Kim if (!go_left && zoneno + 1 >= total_zones) 1033351df4b2SJaegeuk Kim goto got_it; 1034351df4b2SJaegeuk Kim if (go_left && zoneno == 0) 1035351df4b2SJaegeuk Kim goto got_it; 1036351df4b2SJaegeuk Kim } 1037351df4b2SJaegeuk Kim for (i = 0; i < NR_CURSEG_TYPE; i++) 1038351df4b2SJaegeuk Kim if (CURSEG_I(sbi, i)->zone == zoneno) 1039351df4b2SJaegeuk Kim break; 1040351df4b2SJaegeuk Kim 1041351df4b2SJaegeuk Kim if (i < NR_CURSEG_TYPE) { 1042351df4b2SJaegeuk Kim /* zone is in user, try another */ 1043351df4b2SJaegeuk Kim if (go_left) 1044351df4b2SJaegeuk Kim hint = zoneno * sbi->secs_per_zone - 1; 1045351df4b2SJaegeuk Kim else if (zoneno + 1 >= total_zones) 1046351df4b2SJaegeuk Kim hint = 0; 1047351df4b2SJaegeuk Kim else 1048351df4b2SJaegeuk Kim hint = (zoneno + 1) * sbi->secs_per_zone; 1049351df4b2SJaegeuk Kim init = false; 1050351df4b2SJaegeuk Kim goto find_other_zone; 1051351df4b2SJaegeuk Kim } 1052351df4b2SJaegeuk Kim got_it: 1053351df4b2SJaegeuk Kim /* set it as dirty segment in free segmap */ 10549850cf4aSJaegeuk Kim f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); 1055351df4b2SJaegeuk Kim __set_inuse(sbi, segno); 1056351df4b2SJaegeuk Kim *newseg = segno; 10571a118ccfSChao Yu spin_unlock(&free_i->segmap_lock); 1058351df4b2SJaegeuk Kim } 1059351df4b2SJaegeuk Kim 1060351df4b2SJaegeuk Kim static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) 1061351df4b2SJaegeuk Kim { 1062351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 1063351df4b2SJaegeuk Kim struct summary_footer *sum_footer; 1064351df4b2SJaegeuk Kim 1065351df4b2SJaegeuk Kim curseg->segno = curseg->next_segno; 1066351df4b2SJaegeuk Kim curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno); 1067351df4b2SJaegeuk Kim curseg->next_blkoff = 0; 1068351df4b2SJaegeuk Kim curseg->next_segno = NULL_SEGNO; 1069351df4b2SJaegeuk Kim 1070351df4b2SJaegeuk Kim sum_footer = &(curseg->sum_blk->footer); 1071351df4b2SJaegeuk Kim memset(sum_footer, 0, sizeof(struct summary_footer)); 1072351df4b2SJaegeuk Kim if (IS_DATASEG(type)) 1073351df4b2SJaegeuk Kim SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); 1074351df4b2SJaegeuk Kim if (IS_NODESEG(type)) 1075351df4b2SJaegeuk Kim SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); 1076351df4b2SJaegeuk Kim __set_sit_entry_type(sbi, type, curseg->segno, modified); 1077351df4b2SJaegeuk Kim } 1078351df4b2SJaegeuk Kim 10790a8165d7SJaegeuk Kim /* 1080351df4b2SJaegeuk Kim * Allocate a current working segment. 1081351df4b2SJaegeuk Kim * This function always allocates a free segment in LFS manner. 1082351df4b2SJaegeuk Kim */ 1083351df4b2SJaegeuk Kim static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) 1084351df4b2SJaegeuk Kim { 1085351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 1086351df4b2SJaegeuk Kim unsigned int segno = curseg->segno; 1087351df4b2SJaegeuk Kim int dir = ALLOC_LEFT; 1088351df4b2SJaegeuk Kim 1089351df4b2SJaegeuk Kim write_sum_page(sbi, curseg->sum_blk, 109081fb5e87SHaicheng Li GET_SUM_BLOCK(sbi, segno)); 1091351df4b2SJaegeuk Kim if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA) 1092351df4b2SJaegeuk Kim dir = ALLOC_RIGHT; 1093351df4b2SJaegeuk Kim 1094351df4b2SJaegeuk Kim if (test_opt(sbi, NOHEAP)) 1095351df4b2SJaegeuk Kim dir = ALLOC_RIGHT; 1096351df4b2SJaegeuk Kim 1097351df4b2SJaegeuk Kim get_new_segment(sbi, &segno, new_sec, dir); 1098351df4b2SJaegeuk Kim curseg->next_segno = segno; 1099351df4b2SJaegeuk Kim reset_curseg(sbi, type, 1); 1100351df4b2SJaegeuk Kim curseg->alloc_type = LFS; 1101351df4b2SJaegeuk Kim } 1102351df4b2SJaegeuk Kim 1103351df4b2SJaegeuk Kim static void __next_free_blkoff(struct f2fs_sb_info *sbi, 1104351df4b2SJaegeuk Kim struct curseg_info *seg, block_t start) 1105351df4b2SJaegeuk Kim { 1106351df4b2SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, seg->segno); 1107e81c93cfSChangman Lee int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 110860a3b782SJaegeuk Kim unsigned long *target_map = SIT_I(sbi)->tmp_map; 1109e81c93cfSChangman Lee unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 1110e81c93cfSChangman Lee unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 1111e81c93cfSChangman Lee int i, pos; 1112e81c93cfSChangman Lee 1113e81c93cfSChangman Lee for (i = 0; i < entries; i++) 1114e81c93cfSChangman Lee target_map[i] = ckpt_map[i] | cur_map[i]; 1115e81c93cfSChangman Lee 1116e81c93cfSChangman Lee pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start); 1117e81c93cfSChangman Lee 1118e81c93cfSChangman Lee seg->next_blkoff = pos; 1119351df4b2SJaegeuk Kim } 1120351df4b2SJaegeuk Kim 11210a8165d7SJaegeuk Kim /* 1122351df4b2SJaegeuk Kim * If a segment is written by LFS manner, next block offset is just obtained 1123351df4b2SJaegeuk Kim * by increasing the current block offset. However, if a segment is written by 1124351df4b2SJaegeuk Kim * SSR manner, next block offset obtained by calling __next_free_blkoff 1125351df4b2SJaegeuk Kim */ 1126351df4b2SJaegeuk Kim static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, 1127351df4b2SJaegeuk Kim struct curseg_info *seg) 1128351df4b2SJaegeuk Kim { 1129351df4b2SJaegeuk Kim if (seg->alloc_type == SSR) 1130351df4b2SJaegeuk Kim __next_free_blkoff(sbi, seg, seg->next_blkoff + 1); 1131351df4b2SJaegeuk Kim else 1132351df4b2SJaegeuk Kim seg->next_blkoff++; 1133351df4b2SJaegeuk Kim } 1134351df4b2SJaegeuk Kim 11350a8165d7SJaegeuk Kim /* 1136351df4b2SJaegeuk Kim * This function always allocates a used segment(from dirty seglist) by SSR 1137351df4b2SJaegeuk Kim * manner, so it should recover the existing segment information of valid blocks 1138351df4b2SJaegeuk Kim */ 1139351df4b2SJaegeuk Kim static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse) 1140351df4b2SJaegeuk Kim { 1141351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1142351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 1143351df4b2SJaegeuk Kim unsigned int new_segno = curseg->next_segno; 1144351df4b2SJaegeuk Kim struct f2fs_summary_block *sum_node; 1145351df4b2SJaegeuk Kim struct page *sum_page; 1146351df4b2SJaegeuk Kim 1147351df4b2SJaegeuk Kim write_sum_page(sbi, curseg->sum_blk, 1148351df4b2SJaegeuk Kim GET_SUM_BLOCK(sbi, curseg->segno)); 1149351df4b2SJaegeuk Kim __set_test_and_inuse(sbi, new_segno); 1150351df4b2SJaegeuk Kim 1151351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 1152351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, new_segno, PRE); 1153351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, new_segno, DIRTY); 1154351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 1155351df4b2SJaegeuk Kim 1156351df4b2SJaegeuk Kim reset_curseg(sbi, type, 1); 1157351df4b2SJaegeuk Kim curseg->alloc_type = SSR; 1158351df4b2SJaegeuk Kim __next_free_blkoff(sbi, curseg, 0); 1159351df4b2SJaegeuk Kim 1160351df4b2SJaegeuk Kim if (reuse) { 1161351df4b2SJaegeuk Kim sum_page = get_sum_page(sbi, new_segno); 1162351df4b2SJaegeuk Kim sum_node = (struct f2fs_summary_block *)page_address(sum_page); 1163351df4b2SJaegeuk Kim memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); 1164351df4b2SJaegeuk Kim f2fs_put_page(sum_page, 1); 1165351df4b2SJaegeuk Kim } 1166351df4b2SJaegeuk Kim } 1167351df4b2SJaegeuk Kim 116843727527SJaegeuk Kim static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) 116943727527SJaegeuk Kim { 117043727527SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 117143727527SJaegeuk Kim const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops; 117243727527SJaegeuk Kim 117343727527SJaegeuk Kim if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0)) 117443727527SJaegeuk Kim return v_ops->get_victim(sbi, 117543727527SJaegeuk Kim &(curseg)->next_segno, BG_GC, type, SSR); 117643727527SJaegeuk Kim 117743727527SJaegeuk Kim /* For data segments, let's do SSR more intensively */ 117843727527SJaegeuk Kim for (; type >= CURSEG_HOT_DATA; type--) 117943727527SJaegeuk Kim if (v_ops->get_victim(sbi, &(curseg)->next_segno, 118043727527SJaegeuk Kim BG_GC, type, SSR)) 118143727527SJaegeuk Kim return 1; 118243727527SJaegeuk Kim return 0; 118343727527SJaegeuk Kim } 118443727527SJaegeuk Kim 1185351df4b2SJaegeuk Kim /* 1186351df4b2SJaegeuk Kim * flush out current segment and replace it with new segment 1187351df4b2SJaegeuk Kim * This function should be returned with success, otherwise BUG 1188351df4b2SJaegeuk Kim */ 1189351df4b2SJaegeuk Kim static void allocate_segment_by_default(struct f2fs_sb_info *sbi, 1190351df4b2SJaegeuk Kim int type, bool force) 1191351df4b2SJaegeuk Kim { 1192351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 1193351df4b2SJaegeuk Kim 11947b405275SGu Zheng if (force) 1195351df4b2SJaegeuk Kim new_curseg(sbi, type, true); 11967b405275SGu Zheng else if (type == CURSEG_WARM_NODE) 1197351df4b2SJaegeuk Kim new_curseg(sbi, type, false); 119860374688SJaegeuk Kim else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type)) 119960374688SJaegeuk Kim new_curseg(sbi, type, false); 1200351df4b2SJaegeuk Kim else if (need_SSR(sbi) && get_ssr_segment(sbi, type)) 1201351df4b2SJaegeuk Kim change_curseg(sbi, type, true); 1202351df4b2SJaegeuk Kim else 1203351df4b2SJaegeuk Kim new_curseg(sbi, type, false); 1204dcdfff65SJaegeuk Kim 1205dcdfff65SJaegeuk Kim stat_inc_seg_type(sbi, curseg); 1206351df4b2SJaegeuk Kim } 1207351df4b2SJaegeuk Kim 120838aa0889SJaegeuk Kim static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type) 120938aa0889SJaegeuk Kim { 121038aa0889SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 121138aa0889SJaegeuk Kim unsigned int old_segno; 121238aa0889SJaegeuk Kim 121338aa0889SJaegeuk Kim old_segno = curseg->segno; 121438aa0889SJaegeuk Kim SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true); 121538aa0889SJaegeuk Kim locate_dirty_segment(sbi, old_segno); 121638aa0889SJaegeuk Kim } 121738aa0889SJaegeuk Kim 1218351df4b2SJaegeuk Kim void allocate_new_segments(struct f2fs_sb_info *sbi) 1219351df4b2SJaegeuk Kim { 1220351df4b2SJaegeuk Kim int i; 1221351df4b2SJaegeuk Kim 122238aa0889SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) 122338aa0889SJaegeuk Kim __allocate_new_segments(sbi, i); 1224351df4b2SJaegeuk Kim } 1225351df4b2SJaegeuk Kim 1226351df4b2SJaegeuk Kim static const struct segment_allocation default_salloc_ops = { 1227351df4b2SJaegeuk Kim .allocate_segment = allocate_segment_by_default, 1228351df4b2SJaegeuk Kim }; 1229351df4b2SJaegeuk Kim 12304b2fecc8SJaegeuk Kim int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) 12314b2fecc8SJaegeuk Kim { 1232f7ef9b83SJaegeuk Kim __u64 start = F2FS_BYTES_TO_BLK(range->start); 1233f7ef9b83SJaegeuk Kim __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; 12344b2fecc8SJaegeuk Kim unsigned int start_segno, end_segno; 12354b2fecc8SJaegeuk Kim struct cp_control cpc; 1236c34f42e2SChao Yu int err = 0; 12374b2fecc8SJaegeuk Kim 1238836b5a63SJaegeuk Kim if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) 12394b2fecc8SJaegeuk Kim return -EINVAL; 12404b2fecc8SJaegeuk Kim 12419bd27ae4SJan Kara cpc.trimmed = 0; 12427cd8558bSJaegeuk Kim if (end <= MAIN_BLKADDR(sbi)) 12434b2fecc8SJaegeuk Kim goto out; 12444b2fecc8SJaegeuk Kim 12454b2fecc8SJaegeuk Kim /* start/end segment number in main_area */ 12467cd8558bSJaegeuk Kim start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); 12477cd8558bSJaegeuk Kim end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : 12487cd8558bSJaegeuk Kim GET_SEGNO(sbi, end); 12494b2fecc8SJaegeuk Kim cpc.reason = CP_DISCARD; 1250836b5a63SJaegeuk Kim cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen)); 12514b2fecc8SJaegeuk Kim 12524b2fecc8SJaegeuk Kim /* do checkpoint to issue discard commands safely */ 1253bba681cbSJaegeuk Kim for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) { 1254bba681cbSJaegeuk Kim cpc.trim_start = start_segno; 1255a66cdd98SJaegeuk Kim 1256a66cdd98SJaegeuk Kim if (sbi->discard_blks == 0) 1257a66cdd98SJaegeuk Kim break; 1258a66cdd98SJaegeuk Kim else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi)) 1259a66cdd98SJaegeuk Kim cpc.trim_end = end_segno; 1260a66cdd98SJaegeuk Kim else 1261a66cdd98SJaegeuk Kim cpc.trim_end = min_t(unsigned int, 1262a66cdd98SJaegeuk Kim rounddown(start_segno + 1263bba681cbSJaegeuk Kim BATCHED_TRIM_SEGMENTS(sbi), 1264bba681cbSJaegeuk Kim sbi->segs_per_sec) - 1, end_segno); 1265bba681cbSJaegeuk Kim 1266ca4b02eeSJaegeuk Kim mutex_lock(&sbi->gc_mutex); 1267c34f42e2SChao Yu err = write_checkpoint(sbi, &cpc); 1268ca4b02eeSJaegeuk Kim mutex_unlock(&sbi->gc_mutex); 1269bba681cbSJaegeuk Kim } 12704b2fecc8SJaegeuk Kim out: 1271f7ef9b83SJaegeuk Kim range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); 1272c34f42e2SChao Yu return err; 12734b2fecc8SJaegeuk Kim } 12744b2fecc8SJaegeuk Kim 1275351df4b2SJaegeuk Kim static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) 1276351df4b2SJaegeuk Kim { 1277351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 1278351df4b2SJaegeuk Kim if (curseg->next_blkoff < sbi->blocks_per_seg) 1279351df4b2SJaegeuk Kim return true; 1280351df4b2SJaegeuk Kim return false; 1281351df4b2SJaegeuk Kim } 1282351df4b2SJaegeuk Kim 1283351df4b2SJaegeuk Kim static int __get_segment_type_2(struct page *page, enum page_type p_type) 1284351df4b2SJaegeuk Kim { 1285351df4b2SJaegeuk Kim if (p_type == DATA) 1286351df4b2SJaegeuk Kim return CURSEG_HOT_DATA; 1287351df4b2SJaegeuk Kim else 1288351df4b2SJaegeuk Kim return CURSEG_HOT_NODE; 1289351df4b2SJaegeuk Kim } 1290351df4b2SJaegeuk Kim 1291351df4b2SJaegeuk Kim static int __get_segment_type_4(struct page *page, enum page_type p_type) 1292351df4b2SJaegeuk Kim { 1293351df4b2SJaegeuk Kim if (p_type == DATA) { 1294351df4b2SJaegeuk Kim struct inode *inode = page->mapping->host; 1295351df4b2SJaegeuk Kim 1296351df4b2SJaegeuk Kim if (S_ISDIR(inode->i_mode)) 1297351df4b2SJaegeuk Kim return CURSEG_HOT_DATA; 1298351df4b2SJaegeuk Kim else 1299351df4b2SJaegeuk Kim return CURSEG_COLD_DATA; 1300351df4b2SJaegeuk Kim } else { 1301a344b9fdSJaegeuk Kim if (IS_DNODE(page) && is_cold_node(page)) 1302a344b9fdSJaegeuk Kim return CURSEG_WARM_NODE; 1303351df4b2SJaegeuk Kim else 1304351df4b2SJaegeuk Kim return CURSEG_COLD_NODE; 1305351df4b2SJaegeuk Kim } 1306351df4b2SJaegeuk Kim } 1307351df4b2SJaegeuk Kim 1308351df4b2SJaegeuk Kim static int __get_segment_type_6(struct page *page, enum page_type p_type) 1309351df4b2SJaegeuk Kim { 1310351df4b2SJaegeuk Kim if (p_type == DATA) { 1311351df4b2SJaegeuk Kim struct inode *inode = page->mapping->host; 1312351df4b2SJaegeuk Kim 1313351df4b2SJaegeuk Kim if (S_ISDIR(inode->i_mode)) 1314351df4b2SJaegeuk Kim return CURSEG_HOT_DATA; 1315354a3399SJaegeuk Kim else if (is_cold_data(page) || file_is_cold(inode)) 1316351df4b2SJaegeuk Kim return CURSEG_COLD_DATA; 1317351df4b2SJaegeuk Kim else 1318351df4b2SJaegeuk Kim return CURSEG_WARM_DATA; 1319351df4b2SJaegeuk Kim } else { 1320351df4b2SJaegeuk Kim if (IS_DNODE(page)) 1321351df4b2SJaegeuk Kim return is_cold_node(page) ? CURSEG_WARM_NODE : 1322351df4b2SJaegeuk Kim CURSEG_HOT_NODE; 1323351df4b2SJaegeuk Kim else 1324351df4b2SJaegeuk Kim return CURSEG_COLD_NODE; 1325351df4b2SJaegeuk Kim } 1326351df4b2SJaegeuk Kim } 1327351df4b2SJaegeuk Kim 1328351df4b2SJaegeuk Kim static int __get_segment_type(struct page *page, enum page_type p_type) 1329351df4b2SJaegeuk Kim { 13304081363fSJaegeuk Kim switch (F2FS_P_SB(page)->active_logs) { 1331351df4b2SJaegeuk Kim case 2: 1332351df4b2SJaegeuk Kim return __get_segment_type_2(page, p_type); 1333351df4b2SJaegeuk Kim case 4: 1334351df4b2SJaegeuk Kim return __get_segment_type_4(page, p_type); 1335351df4b2SJaegeuk Kim } 133612a67146SJaegeuk Kim /* NR_CURSEG_TYPE(6) logs by default */ 13379850cf4aSJaegeuk Kim f2fs_bug_on(F2FS_P_SB(page), 13389850cf4aSJaegeuk Kim F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE); 133912a67146SJaegeuk Kim return __get_segment_type_6(page, p_type); 1340351df4b2SJaegeuk Kim } 1341351df4b2SJaegeuk Kim 1342bfad7c2dSJaegeuk Kim void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 1343351df4b2SJaegeuk Kim block_t old_blkaddr, block_t *new_blkaddr, 1344bfad7c2dSJaegeuk Kim struct f2fs_summary *sum, int type) 1345351df4b2SJaegeuk Kim { 1346351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1347351df4b2SJaegeuk Kim struct curseg_info *curseg; 134838aa0889SJaegeuk Kim bool direct_io = (type == CURSEG_DIRECT_IO); 134938aa0889SJaegeuk Kim 135038aa0889SJaegeuk Kim type = direct_io ? CURSEG_WARM_DATA : type; 1351351df4b2SJaegeuk Kim 1352351df4b2SJaegeuk Kim curseg = CURSEG_I(sbi, type); 1353351df4b2SJaegeuk Kim 1354351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 135521cb1d99SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 1356351df4b2SJaegeuk Kim 135738aa0889SJaegeuk Kim /* direct_io'ed data is aligned to the segment for better performance */ 135847e70ca4SJaegeuk Kim if (direct_io && curseg->next_blkoff && 135947e70ca4SJaegeuk Kim !has_not_enough_free_secs(sbi, 0)) 136038aa0889SJaegeuk Kim __allocate_new_segments(sbi, type); 136138aa0889SJaegeuk Kim 1362351df4b2SJaegeuk Kim *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 1363351df4b2SJaegeuk Kim 1364351df4b2SJaegeuk Kim /* 1365351df4b2SJaegeuk Kim * __add_sum_entry should be resided under the curseg_mutex 1366351df4b2SJaegeuk Kim * because, this function updates a summary entry in the 1367351df4b2SJaegeuk Kim * current summary block. 1368351df4b2SJaegeuk Kim */ 1369e79efe3bSHaicheng Li __add_sum_entry(sbi, type, sum); 1370351df4b2SJaegeuk Kim 1371351df4b2SJaegeuk Kim __refresh_next_blkoff(sbi, curseg); 1372dcdfff65SJaegeuk Kim 1373dcdfff65SJaegeuk Kim stat_inc_block_count(sbi, curseg); 1374351df4b2SJaegeuk Kim 13755e443818SJaegeuk Kim if (!__has_curseg_space(sbi, type)) 13765e443818SJaegeuk Kim sit_i->s_ops->allocate_segment(sbi, type, false); 1377351df4b2SJaegeuk Kim /* 1378351df4b2SJaegeuk Kim * SIT information should be updated before segment allocation, 1379351df4b2SJaegeuk Kim * since SSR needs latest valid block information. 1380351df4b2SJaegeuk Kim */ 1381351df4b2SJaegeuk Kim refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); 13825e443818SJaegeuk Kim 1383351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 1384351df4b2SJaegeuk Kim 1385bfad7c2dSJaegeuk Kim if (page && IS_NODESEG(type)) 1386351df4b2SJaegeuk Kim fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); 1387351df4b2SJaegeuk Kim 1388bfad7c2dSJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 1389bfad7c2dSJaegeuk Kim } 1390bfad7c2dSJaegeuk Kim 139105ca3632SJaegeuk Kim static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) 1392bfad7c2dSJaegeuk Kim { 139305ca3632SJaegeuk Kim int type = __get_segment_type(fio->page, fio->type); 1394bfad7c2dSJaegeuk Kim 13957a9d7548SChao Yu allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, 13967a9d7548SChao Yu &fio->new_blkaddr, sum, type); 1397bfad7c2dSJaegeuk Kim 1398351df4b2SJaegeuk Kim /* writeout dirty page into bdev */ 139905ca3632SJaegeuk Kim f2fs_submit_page_mbio(fio); 1400351df4b2SJaegeuk Kim } 1401351df4b2SJaegeuk Kim 1402577e3495SJaegeuk Kim void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) 1403351df4b2SJaegeuk Kim { 1404458e6197SJaegeuk Kim struct f2fs_io_info fio = { 140505ca3632SJaegeuk Kim .sbi = sbi, 1406458e6197SJaegeuk Kim .type = META, 1407*04d328deSMike Christie .op = REQ_OP_WRITE, 1408*04d328deSMike Christie .op_flags = WRITE_SYNC | REQ_META | REQ_PRIO, 14097a9d7548SChao Yu .old_blkaddr = page->index, 14107a9d7548SChao Yu .new_blkaddr = page->index, 141105ca3632SJaegeuk Kim .page = page, 14124375a336SJaegeuk Kim .encrypted_page = NULL, 1413458e6197SJaegeuk Kim }; 1414458e6197SJaegeuk Kim 14152b947003SChao Yu if (unlikely(page->index >= MAIN_BLKADDR(sbi))) 1416*04d328deSMike Christie fio.op_flags &= ~REQ_META; 14172b947003SChao Yu 1418351df4b2SJaegeuk Kim set_page_writeback(page); 141905ca3632SJaegeuk Kim f2fs_submit_page_mbio(&fio); 1420351df4b2SJaegeuk Kim } 1421351df4b2SJaegeuk Kim 142205ca3632SJaegeuk Kim void write_node_page(unsigned int nid, struct f2fs_io_info *fio) 1423351df4b2SJaegeuk Kim { 1424351df4b2SJaegeuk Kim struct f2fs_summary sum; 142505ca3632SJaegeuk Kim 1426351df4b2SJaegeuk Kim set_summary(&sum, nid, 0, 0); 142705ca3632SJaegeuk Kim do_write_page(&sum, fio); 1428351df4b2SJaegeuk Kim } 1429351df4b2SJaegeuk Kim 143005ca3632SJaegeuk Kim void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio) 1431351df4b2SJaegeuk Kim { 143205ca3632SJaegeuk Kim struct f2fs_sb_info *sbi = fio->sbi; 1433351df4b2SJaegeuk Kim struct f2fs_summary sum; 1434351df4b2SJaegeuk Kim struct node_info ni; 1435351df4b2SJaegeuk Kim 14369850cf4aSJaegeuk Kim f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); 1437351df4b2SJaegeuk Kim get_node_info(sbi, dn->nid, &ni); 1438351df4b2SJaegeuk Kim set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 143905ca3632SJaegeuk Kim do_write_page(&sum, fio); 1440f28b3434SChao Yu f2fs_update_data_blkaddr(dn, fio->new_blkaddr); 1441351df4b2SJaegeuk Kim } 1442351df4b2SJaegeuk Kim 144305ca3632SJaegeuk Kim void rewrite_data_page(struct f2fs_io_info *fio) 1444351df4b2SJaegeuk Kim { 14457a9d7548SChao Yu fio->new_blkaddr = fio->old_blkaddr; 144605ca3632SJaegeuk Kim stat_inc_inplace_blocks(fio->sbi); 144705ca3632SJaegeuk Kim f2fs_submit_page_mbio(fio); 1448351df4b2SJaegeuk Kim } 1449351df4b2SJaegeuk Kim 14504356e48eSChao Yu void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 145119f106bcSChao Yu block_t old_blkaddr, block_t new_blkaddr, 145228bc106bSChao Yu bool recover_curseg, bool recover_newaddr) 1453351df4b2SJaegeuk Kim { 1454351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1455351df4b2SJaegeuk Kim struct curseg_info *curseg; 1456351df4b2SJaegeuk Kim unsigned int segno, old_cursegno; 1457351df4b2SJaegeuk Kim struct seg_entry *se; 1458351df4b2SJaegeuk Kim int type; 145919f106bcSChao Yu unsigned short old_blkoff; 1460351df4b2SJaegeuk Kim 1461351df4b2SJaegeuk Kim segno = GET_SEGNO(sbi, new_blkaddr); 1462351df4b2SJaegeuk Kim se = get_seg_entry(sbi, segno); 1463351df4b2SJaegeuk Kim type = se->type; 1464351df4b2SJaegeuk Kim 146519f106bcSChao Yu if (!recover_curseg) { 146619f106bcSChao Yu /* for recovery flow */ 1467351df4b2SJaegeuk Kim if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { 1468351df4b2SJaegeuk Kim if (old_blkaddr == NULL_ADDR) 1469351df4b2SJaegeuk Kim type = CURSEG_COLD_DATA; 1470351df4b2SJaegeuk Kim else 1471351df4b2SJaegeuk Kim type = CURSEG_WARM_DATA; 1472351df4b2SJaegeuk Kim } 147319f106bcSChao Yu } else { 147419f106bcSChao Yu if (!IS_CURSEG(sbi, segno)) 147519f106bcSChao Yu type = CURSEG_WARM_DATA; 147619f106bcSChao Yu } 147719f106bcSChao Yu 1478351df4b2SJaegeuk Kim curseg = CURSEG_I(sbi, type); 1479351df4b2SJaegeuk Kim 1480351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 1481351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 1482351df4b2SJaegeuk Kim 1483351df4b2SJaegeuk Kim old_cursegno = curseg->segno; 148419f106bcSChao Yu old_blkoff = curseg->next_blkoff; 1485351df4b2SJaegeuk Kim 1486351df4b2SJaegeuk Kim /* change the current segment */ 1487351df4b2SJaegeuk Kim if (segno != curseg->segno) { 1488351df4b2SJaegeuk Kim curseg->next_segno = segno; 1489351df4b2SJaegeuk Kim change_curseg(sbi, type, true); 1490351df4b2SJaegeuk Kim } 1491351df4b2SJaegeuk Kim 1492491c0854SJaegeuk Kim curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr); 1493e79efe3bSHaicheng Li __add_sum_entry(sbi, type, sum); 1494351df4b2SJaegeuk Kim 149528bc106bSChao Yu if (!recover_curseg || recover_newaddr) 14966e2c64adSJaegeuk Kim update_sit_entry(sbi, new_blkaddr, 1); 14976e2c64adSJaegeuk Kim if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) 14986e2c64adSJaegeuk Kim update_sit_entry(sbi, old_blkaddr, -1); 14996e2c64adSJaegeuk Kim 15006e2c64adSJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 15016e2c64adSJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr)); 15026e2c64adSJaegeuk Kim 1503351df4b2SJaegeuk Kim locate_dirty_segment(sbi, old_cursegno); 1504351df4b2SJaegeuk Kim 150519f106bcSChao Yu if (recover_curseg) { 150619f106bcSChao Yu if (old_cursegno != curseg->segno) { 150719f106bcSChao Yu curseg->next_segno = old_cursegno; 150819f106bcSChao Yu change_curseg(sbi, type, true); 150919f106bcSChao Yu } 151019f106bcSChao Yu curseg->next_blkoff = old_blkoff; 151119f106bcSChao Yu } 151219f106bcSChao Yu 1513351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 1514351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 1515351df4b2SJaegeuk Kim } 1516351df4b2SJaegeuk Kim 1517528e3459SChao Yu void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 1518528e3459SChao Yu block_t old_addr, block_t new_addr, 151928bc106bSChao Yu unsigned char version, bool recover_curseg, 152028bc106bSChao Yu bool recover_newaddr) 1521528e3459SChao Yu { 1522528e3459SChao Yu struct f2fs_summary sum; 1523528e3459SChao Yu 1524528e3459SChao Yu set_summary(&sum, dn->nid, dn->ofs_in_node, version); 1525528e3459SChao Yu 152628bc106bSChao Yu __f2fs_replace_block(sbi, &sum, old_addr, new_addr, 152728bc106bSChao Yu recover_curseg, recover_newaddr); 1528528e3459SChao Yu 1529f28b3434SChao Yu f2fs_update_data_blkaddr(dn, new_addr); 1530528e3459SChao Yu } 1531528e3459SChao Yu 153293dfe2acSJaegeuk Kim void f2fs_wait_on_page_writeback(struct page *page, 1533fec1d657SJaegeuk Kim enum page_type type, bool ordered) 153493dfe2acSJaegeuk Kim { 153593dfe2acSJaegeuk Kim if (PageWriteback(page)) { 15364081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_P_SB(page); 15374081363fSJaegeuk Kim 15380c3a5797SChao Yu f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, type, WRITE); 1539fec1d657SJaegeuk Kim if (ordered) 154093dfe2acSJaegeuk Kim wait_on_page_writeback(page); 1541fec1d657SJaegeuk Kim else 1542fec1d657SJaegeuk Kim wait_for_stable_page(page); 154393dfe2acSJaegeuk Kim } 154493dfe2acSJaegeuk Kim } 154593dfe2acSJaegeuk Kim 154608b39fbdSChao Yu void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi, 154708b39fbdSChao Yu block_t blkaddr) 154808b39fbdSChao Yu { 154908b39fbdSChao Yu struct page *cpage; 155008b39fbdSChao Yu 155108b39fbdSChao Yu if (blkaddr == NEW_ADDR) 155208b39fbdSChao Yu return; 155308b39fbdSChao Yu 155408b39fbdSChao Yu f2fs_bug_on(sbi, blkaddr == NULL_ADDR); 155508b39fbdSChao Yu 155608b39fbdSChao Yu cpage = find_lock_page(META_MAPPING(sbi), blkaddr); 155708b39fbdSChao Yu if (cpage) { 1558fec1d657SJaegeuk Kim f2fs_wait_on_page_writeback(cpage, DATA, true); 155908b39fbdSChao Yu f2fs_put_page(cpage, 1); 156008b39fbdSChao Yu } 156108b39fbdSChao Yu } 156208b39fbdSChao Yu 1563351df4b2SJaegeuk Kim static int read_compacted_summaries(struct f2fs_sb_info *sbi) 1564351df4b2SJaegeuk Kim { 1565351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1566351df4b2SJaegeuk Kim struct curseg_info *seg_i; 1567351df4b2SJaegeuk Kim unsigned char *kaddr; 1568351df4b2SJaegeuk Kim struct page *page; 1569351df4b2SJaegeuk Kim block_t start; 1570351df4b2SJaegeuk Kim int i, j, offset; 1571351df4b2SJaegeuk Kim 1572351df4b2SJaegeuk Kim start = start_sum_block(sbi); 1573351df4b2SJaegeuk Kim 1574351df4b2SJaegeuk Kim page = get_meta_page(sbi, start++); 1575351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 1576351df4b2SJaegeuk Kim 1577351df4b2SJaegeuk Kim /* Step 1: restore nat cache */ 1578351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 1579b7ad7512SChao Yu memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE); 1580351df4b2SJaegeuk Kim 1581351df4b2SJaegeuk Kim /* Step 2: restore sit cache */ 1582351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 1583b7ad7512SChao Yu memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE); 1584351df4b2SJaegeuk Kim offset = 2 * SUM_JOURNAL_SIZE; 1585351df4b2SJaegeuk Kim 1586351df4b2SJaegeuk Kim /* Step 3: restore summary entries */ 1587351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 1588351df4b2SJaegeuk Kim unsigned short blk_off; 1589351df4b2SJaegeuk Kim unsigned int segno; 1590351df4b2SJaegeuk Kim 1591351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, i); 1592351df4b2SJaegeuk Kim segno = le32_to_cpu(ckpt->cur_data_segno[i]); 1593351df4b2SJaegeuk Kim blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); 1594351df4b2SJaegeuk Kim seg_i->next_segno = segno; 1595351df4b2SJaegeuk Kim reset_curseg(sbi, i, 0); 1596351df4b2SJaegeuk Kim seg_i->alloc_type = ckpt->alloc_type[i]; 1597351df4b2SJaegeuk Kim seg_i->next_blkoff = blk_off; 1598351df4b2SJaegeuk Kim 1599351df4b2SJaegeuk Kim if (seg_i->alloc_type == SSR) 1600351df4b2SJaegeuk Kim blk_off = sbi->blocks_per_seg; 1601351df4b2SJaegeuk Kim 1602351df4b2SJaegeuk Kim for (j = 0; j < blk_off; j++) { 1603351df4b2SJaegeuk Kim struct f2fs_summary *s; 1604351df4b2SJaegeuk Kim s = (struct f2fs_summary *)(kaddr + offset); 1605351df4b2SJaegeuk Kim seg_i->sum_blk->entries[j] = *s; 1606351df4b2SJaegeuk Kim offset += SUMMARY_SIZE; 160709cbfeafSKirill A. Shutemov if (offset + SUMMARY_SIZE <= PAGE_SIZE - 1608351df4b2SJaegeuk Kim SUM_FOOTER_SIZE) 1609351df4b2SJaegeuk Kim continue; 1610351df4b2SJaegeuk Kim 1611351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 1612351df4b2SJaegeuk Kim page = NULL; 1613351df4b2SJaegeuk Kim 1614351df4b2SJaegeuk Kim page = get_meta_page(sbi, start++); 1615351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 1616351df4b2SJaegeuk Kim offset = 0; 1617351df4b2SJaegeuk Kim } 1618351df4b2SJaegeuk Kim } 1619351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 1620351df4b2SJaegeuk Kim return 0; 1621351df4b2SJaegeuk Kim } 1622351df4b2SJaegeuk Kim 1623351df4b2SJaegeuk Kim static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) 1624351df4b2SJaegeuk Kim { 1625351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1626351df4b2SJaegeuk Kim struct f2fs_summary_block *sum; 1627351df4b2SJaegeuk Kim struct curseg_info *curseg; 1628351df4b2SJaegeuk Kim struct page *new; 1629351df4b2SJaegeuk Kim unsigned short blk_off; 1630351df4b2SJaegeuk Kim unsigned int segno = 0; 1631351df4b2SJaegeuk Kim block_t blk_addr = 0; 1632351df4b2SJaegeuk Kim 1633351df4b2SJaegeuk Kim /* get segment number and block addr */ 1634351df4b2SJaegeuk Kim if (IS_DATASEG(type)) { 1635351df4b2SJaegeuk Kim segno = le32_to_cpu(ckpt->cur_data_segno[type]); 1636351df4b2SJaegeuk Kim blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - 1637351df4b2SJaegeuk Kim CURSEG_HOT_DATA]); 1638119ee914SJaegeuk Kim if (__exist_node_summaries(sbi)) 1639351df4b2SJaegeuk Kim blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); 1640351df4b2SJaegeuk Kim else 1641351df4b2SJaegeuk Kim blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); 1642351df4b2SJaegeuk Kim } else { 1643351df4b2SJaegeuk Kim segno = le32_to_cpu(ckpt->cur_node_segno[type - 1644351df4b2SJaegeuk Kim CURSEG_HOT_NODE]); 1645351df4b2SJaegeuk Kim blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - 1646351df4b2SJaegeuk Kim CURSEG_HOT_NODE]); 1647119ee914SJaegeuk Kim if (__exist_node_summaries(sbi)) 1648351df4b2SJaegeuk Kim blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, 1649351df4b2SJaegeuk Kim type - CURSEG_HOT_NODE); 1650351df4b2SJaegeuk Kim else 1651351df4b2SJaegeuk Kim blk_addr = GET_SUM_BLOCK(sbi, segno); 1652351df4b2SJaegeuk Kim } 1653351df4b2SJaegeuk Kim 1654351df4b2SJaegeuk Kim new = get_meta_page(sbi, blk_addr); 1655351df4b2SJaegeuk Kim sum = (struct f2fs_summary_block *)page_address(new); 1656351df4b2SJaegeuk Kim 1657351df4b2SJaegeuk Kim if (IS_NODESEG(type)) { 1658119ee914SJaegeuk Kim if (__exist_node_summaries(sbi)) { 1659351df4b2SJaegeuk Kim struct f2fs_summary *ns = &sum->entries[0]; 1660351df4b2SJaegeuk Kim int i; 1661351df4b2SJaegeuk Kim for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { 1662351df4b2SJaegeuk Kim ns->version = 0; 1663351df4b2SJaegeuk Kim ns->ofs_in_node = 0; 1664351df4b2SJaegeuk Kim } 1665351df4b2SJaegeuk Kim } else { 1666d653788aSGu Zheng int err; 1667d653788aSGu Zheng 1668d653788aSGu Zheng err = restore_node_summary(sbi, segno, sum); 1669d653788aSGu Zheng if (err) { 1670351df4b2SJaegeuk Kim f2fs_put_page(new, 1); 1671d653788aSGu Zheng return err; 1672351df4b2SJaegeuk Kim } 1673351df4b2SJaegeuk Kim } 1674351df4b2SJaegeuk Kim } 1675351df4b2SJaegeuk Kim 1676351df4b2SJaegeuk Kim /* set uncompleted segment to curseg */ 1677351df4b2SJaegeuk Kim curseg = CURSEG_I(sbi, type); 1678351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 1679b7ad7512SChao Yu 1680b7ad7512SChao Yu /* update journal info */ 1681b7ad7512SChao Yu down_write(&curseg->journal_rwsem); 1682b7ad7512SChao Yu memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE); 1683b7ad7512SChao Yu up_write(&curseg->journal_rwsem); 1684b7ad7512SChao Yu 1685b7ad7512SChao Yu memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE); 1686b7ad7512SChao Yu memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE); 1687351df4b2SJaegeuk Kim curseg->next_segno = segno; 1688351df4b2SJaegeuk Kim reset_curseg(sbi, type, 0); 1689351df4b2SJaegeuk Kim curseg->alloc_type = ckpt->alloc_type[type]; 1690351df4b2SJaegeuk Kim curseg->next_blkoff = blk_off; 1691351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 1692351df4b2SJaegeuk Kim f2fs_put_page(new, 1); 1693351df4b2SJaegeuk Kim return 0; 1694351df4b2SJaegeuk Kim } 1695351df4b2SJaegeuk Kim 1696351df4b2SJaegeuk Kim static int restore_curseg_summaries(struct f2fs_sb_info *sbi) 1697351df4b2SJaegeuk Kim { 1698351df4b2SJaegeuk Kim int type = CURSEG_HOT_DATA; 1699e4fc5fbfSChao Yu int err; 1700351df4b2SJaegeuk Kim 170125ca923bSJaegeuk Kim if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { 17023fa06d7bSChao Yu int npages = npages_for_summary_flush(sbi, true); 17033fa06d7bSChao Yu 17043fa06d7bSChao Yu if (npages >= 2) 17053fa06d7bSChao Yu ra_meta_pages(sbi, start_sum_block(sbi), npages, 170626879fb1SChao Yu META_CP, true); 17073fa06d7bSChao Yu 1708351df4b2SJaegeuk Kim /* restore for compacted data summary */ 1709351df4b2SJaegeuk Kim if (read_compacted_summaries(sbi)) 1710351df4b2SJaegeuk Kim return -EINVAL; 1711351df4b2SJaegeuk Kim type = CURSEG_HOT_NODE; 1712351df4b2SJaegeuk Kim } 1713351df4b2SJaegeuk Kim 1714119ee914SJaegeuk Kim if (__exist_node_summaries(sbi)) 17153fa06d7bSChao Yu ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type), 171626879fb1SChao Yu NR_CURSEG_TYPE - type, META_CP, true); 17173fa06d7bSChao Yu 1718e4fc5fbfSChao Yu for (; type <= CURSEG_COLD_NODE; type++) { 1719e4fc5fbfSChao Yu err = read_normal_summaries(sbi, type); 1720e4fc5fbfSChao Yu if (err) 1721e4fc5fbfSChao Yu return err; 1722e4fc5fbfSChao Yu } 1723e4fc5fbfSChao Yu 1724351df4b2SJaegeuk Kim return 0; 1725351df4b2SJaegeuk Kim } 1726351df4b2SJaegeuk Kim 1727351df4b2SJaegeuk Kim static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) 1728351df4b2SJaegeuk Kim { 1729351df4b2SJaegeuk Kim struct page *page; 1730351df4b2SJaegeuk Kim unsigned char *kaddr; 1731351df4b2SJaegeuk Kim struct f2fs_summary *summary; 1732351df4b2SJaegeuk Kim struct curseg_info *seg_i; 1733351df4b2SJaegeuk Kim int written_size = 0; 1734351df4b2SJaegeuk Kim int i, j; 1735351df4b2SJaegeuk Kim 1736351df4b2SJaegeuk Kim page = grab_meta_page(sbi, blkaddr++); 1737351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 1738351df4b2SJaegeuk Kim 1739351df4b2SJaegeuk Kim /* Step 1: write nat cache */ 1740351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 1741b7ad7512SChao Yu memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE); 1742351df4b2SJaegeuk Kim written_size += SUM_JOURNAL_SIZE; 1743351df4b2SJaegeuk Kim 1744351df4b2SJaegeuk Kim /* Step 2: write sit cache */ 1745351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 1746b7ad7512SChao Yu memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE); 1747351df4b2SJaegeuk Kim written_size += SUM_JOURNAL_SIZE; 1748351df4b2SJaegeuk Kim 1749351df4b2SJaegeuk Kim /* Step 3: write summary entries */ 1750351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 1751351df4b2SJaegeuk Kim unsigned short blkoff; 1752351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, i); 1753351df4b2SJaegeuk Kim if (sbi->ckpt->alloc_type[i] == SSR) 1754351df4b2SJaegeuk Kim blkoff = sbi->blocks_per_seg; 1755351df4b2SJaegeuk Kim else 1756351df4b2SJaegeuk Kim blkoff = curseg_blkoff(sbi, i); 1757351df4b2SJaegeuk Kim 1758351df4b2SJaegeuk Kim for (j = 0; j < blkoff; j++) { 1759351df4b2SJaegeuk Kim if (!page) { 1760351df4b2SJaegeuk Kim page = grab_meta_page(sbi, blkaddr++); 1761351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 1762351df4b2SJaegeuk Kim written_size = 0; 1763351df4b2SJaegeuk Kim } 1764351df4b2SJaegeuk Kim summary = (struct f2fs_summary *)(kaddr + written_size); 1765351df4b2SJaegeuk Kim *summary = seg_i->sum_blk->entries[j]; 1766351df4b2SJaegeuk Kim written_size += SUMMARY_SIZE; 1767351df4b2SJaegeuk Kim 176809cbfeafSKirill A. Shutemov if (written_size + SUMMARY_SIZE <= PAGE_SIZE - 1769351df4b2SJaegeuk Kim SUM_FOOTER_SIZE) 1770351df4b2SJaegeuk Kim continue; 1771351df4b2SJaegeuk Kim 1772e8d61a74SChao Yu set_page_dirty(page); 1773351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 1774351df4b2SJaegeuk Kim page = NULL; 1775351df4b2SJaegeuk Kim } 1776351df4b2SJaegeuk Kim } 1777e8d61a74SChao Yu if (page) { 1778e8d61a74SChao Yu set_page_dirty(page); 1779351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 1780351df4b2SJaegeuk Kim } 1781e8d61a74SChao Yu } 1782351df4b2SJaegeuk Kim 1783351df4b2SJaegeuk Kim static void write_normal_summaries(struct f2fs_sb_info *sbi, 1784351df4b2SJaegeuk Kim block_t blkaddr, int type) 1785351df4b2SJaegeuk Kim { 1786351df4b2SJaegeuk Kim int i, end; 1787351df4b2SJaegeuk Kim if (IS_DATASEG(type)) 1788351df4b2SJaegeuk Kim end = type + NR_CURSEG_DATA_TYPE; 1789351df4b2SJaegeuk Kim else 1790351df4b2SJaegeuk Kim end = type + NR_CURSEG_NODE_TYPE; 1791351df4b2SJaegeuk Kim 1792b7ad7512SChao Yu for (i = type; i < end; i++) 1793b7ad7512SChao Yu write_current_sum_page(sbi, i, blkaddr + (i - type)); 1794351df4b2SJaegeuk Kim } 1795351df4b2SJaegeuk Kim 1796351df4b2SJaegeuk Kim void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 1797351df4b2SJaegeuk Kim { 179825ca923bSJaegeuk Kim if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) 1799351df4b2SJaegeuk Kim write_compacted_summaries(sbi, start_blk); 1800351df4b2SJaegeuk Kim else 1801351df4b2SJaegeuk Kim write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); 1802351df4b2SJaegeuk Kim } 1803351df4b2SJaegeuk Kim 1804351df4b2SJaegeuk Kim void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 1805351df4b2SJaegeuk Kim { 1806351df4b2SJaegeuk Kim write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); 1807351df4b2SJaegeuk Kim } 1808351df4b2SJaegeuk Kim 1809dfc08a12SChao Yu int lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 1810351df4b2SJaegeuk Kim unsigned int val, int alloc) 1811351df4b2SJaegeuk Kim { 1812351df4b2SJaegeuk Kim int i; 1813351df4b2SJaegeuk Kim 1814351df4b2SJaegeuk Kim if (type == NAT_JOURNAL) { 1815dfc08a12SChao Yu for (i = 0; i < nats_in_cursum(journal); i++) { 1816dfc08a12SChao Yu if (le32_to_cpu(nid_in_journal(journal, i)) == val) 1817351df4b2SJaegeuk Kim return i; 1818351df4b2SJaegeuk Kim } 1819dfc08a12SChao Yu if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL)) 1820dfc08a12SChao Yu return update_nats_in_cursum(journal, 1); 1821351df4b2SJaegeuk Kim } else if (type == SIT_JOURNAL) { 1822dfc08a12SChao Yu for (i = 0; i < sits_in_cursum(journal); i++) 1823dfc08a12SChao Yu if (le32_to_cpu(segno_in_journal(journal, i)) == val) 1824351df4b2SJaegeuk Kim return i; 1825dfc08a12SChao Yu if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL)) 1826dfc08a12SChao Yu return update_sits_in_cursum(journal, 1); 1827351df4b2SJaegeuk Kim } 1828351df4b2SJaegeuk Kim return -1; 1829351df4b2SJaegeuk Kim } 1830351df4b2SJaegeuk Kim 1831351df4b2SJaegeuk Kim static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, 1832351df4b2SJaegeuk Kim unsigned int segno) 1833351df4b2SJaegeuk Kim { 18342cc22186SGu Zheng return get_meta_page(sbi, current_sit_addr(sbi, segno)); 1835351df4b2SJaegeuk Kim } 1836351df4b2SJaegeuk Kim 1837351df4b2SJaegeuk Kim static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, 1838351df4b2SJaegeuk Kim unsigned int start) 1839351df4b2SJaegeuk Kim { 1840351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1841351df4b2SJaegeuk Kim struct page *src_page, *dst_page; 1842351df4b2SJaegeuk Kim pgoff_t src_off, dst_off; 1843351df4b2SJaegeuk Kim void *src_addr, *dst_addr; 1844351df4b2SJaegeuk Kim 1845351df4b2SJaegeuk Kim src_off = current_sit_addr(sbi, start); 1846351df4b2SJaegeuk Kim dst_off = next_sit_addr(sbi, src_off); 1847351df4b2SJaegeuk Kim 1848351df4b2SJaegeuk Kim /* get current sit block page without lock */ 1849351df4b2SJaegeuk Kim src_page = get_meta_page(sbi, src_off); 1850351df4b2SJaegeuk Kim dst_page = grab_meta_page(sbi, dst_off); 18519850cf4aSJaegeuk Kim f2fs_bug_on(sbi, PageDirty(src_page)); 1852351df4b2SJaegeuk Kim 1853351df4b2SJaegeuk Kim src_addr = page_address(src_page); 1854351df4b2SJaegeuk Kim dst_addr = page_address(dst_page); 185509cbfeafSKirill A. Shutemov memcpy(dst_addr, src_addr, PAGE_SIZE); 1856351df4b2SJaegeuk Kim 1857351df4b2SJaegeuk Kim set_page_dirty(dst_page); 1858351df4b2SJaegeuk Kim f2fs_put_page(src_page, 1); 1859351df4b2SJaegeuk Kim 1860351df4b2SJaegeuk Kim set_to_next_sit(sit_i, start); 1861351df4b2SJaegeuk Kim 1862351df4b2SJaegeuk Kim return dst_page; 1863351df4b2SJaegeuk Kim } 1864351df4b2SJaegeuk Kim 1865184a5cd2SChao Yu static struct sit_entry_set *grab_sit_entry_set(void) 1866184a5cd2SChao Yu { 1867184a5cd2SChao Yu struct sit_entry_set *ses = 186880c54505SJaegeuk Kim f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS); 1869184a5cd2SChao Yu 1870184a5cd2SChao Yu ses->entry_cnt = 0; 1871184a5cd2SChao Yu INIT_LIST_HEAD(&ses->set_list); 1872184a5cd2SChao Yu return ses; 1873184a5cd2SChao Yu } 1874184a5cd2SChao Yu 1875184a5cd2SChao Yu static void release_sit_entry_set(struct sit_entry_set *ses) 1876184a5cd2SChao Yu { 1877184a5cd2SChao Yu list_del(&ses->set_list); 1878184a5cd2SChao Yu kmem_cache_free(sit_entry_set_slab, ses); 1879184a5cd2SChao Yu } 1880184a5cd2SChao Yu 1881184a5cd2SChao Yu static void adjust_sit_entry_set(struct sit_entry_set *ses, 1882184a5cd2SChao Yu struct list_head *head) 1883184a5cd2SChao Yu { 1884184a5cd2SChao Yu struct sit_entry_set *next = ses; 1885184a5cd2SChao Yu 1886184a5cd2SChao Yu if (list_is_last(&ses->set_list, head)) 1887184a5cd2SChao Yu return; 1888184a5cd2SChao Yu 1889184a5cd2SChao Yu list_for_each_entry_continue(next, head, set_list) 1890184a5cd2SChao Yu if (ses->entry_cnt <= next->entry_cnt) 1891184a5cd2SChao Yu break; 1892184a5cd2SChao Yu 1893184a5cd2SChao Yu list_move_tail(&ses->set_list, &next->set_list); 1894184a5cd2SChao Yu } 1895184a5cd2SChao Yu 1896184a5cd2SChao Yu static void add_sit_entry(unsigned int segno, struct list_head *head) 1897184a5cd2SChao Yu { 1898184a5cd2SChao Yu struct sit_entry_set *ses; 1899184a5cd2SChao Yu unsigned int start_segno = START_SEGNO(segno); 1900184a5cd2SChao Yu 1901184a5cd2SChao Yu list_for_each_entry(ses, head, set_list) { 1902184a5cd2SChao Yu if (ses->start_segno == start_segno) { 1903184a5cd2SChao Yu ses->entry_cnt++; 1904184a5cd2SChao Yu adjust_sit_entry_set(ses, head); 1905184a5cd2SChao Yu return; 1906184a5cd2SChao Yu } 1907184a5cd2SChao Yu } 1908184a5cd2SChao Yu 1909184a5cd2SChao Yu ses = grab_sit_entry_set(); 1910184a5cd2SChao Yu 1911184a5cd2SChao Yu ses->start_segno = start_segno; 1912184a5cd2SChao Yu ses->entry_cnt++; 1913184a5cd2SChao Yu list_add(&ses->set_list, head); 1914184a5cd2SChao Yu } 1915184a5cd2SChao Yu 1916184a5cd2SChao Yu static void add_sits_in_set(struct f2fs_sb_info *sbi) 1917184a5cd2SChao Yu { 1918184a5cd2SChao Yu struct f2fs_sm_info *sm_info = SM_I(sbi); 1919184a5cd2SChao Yu struct list_head *set_list = &sm_info->sit_entry_set; 1920184a5cd2SChao Yu unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap; 1921184a5cd2SChao Yu unsigned int segno; 1922184a5cd2SChao Yu 19237cd8558bSJaegeuk Kim for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi)) 1924184a5cd2SChao Yu add_sit_entry(segno, set_list); 1925184a5cd2SChao Yu } 1926184a5cd2SChao Yu 1927184a5cd2SChao Yu static void remove_sits_in_journal(struct f2fs_sb_info *sbi) 1928351df4b2SJaegeuk Kim { 1929351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1930b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal; 1931351df4b2SJaegeuk Kim int i; 1932351df4b2SJaegeuk Kim 1933b7ad7512SChao Yu down_write(&curseg->journal_rwsem); 1934dfc08a12SChao Yu for (i = 0; i < sits_in_cursum(journal); i++) { 1935351df4b2SJaegeuk Kim unsigned int segno; 1936184a5cd2SChao Yu bool dirtied; 1937184a5cd2SChao Yu 1938dfc08a12SChao Yu segno = le32_to_cpu(segno_in_journal(journal, i)); 1939184a5cd2SChao Yu dirtied = __mark_sit_entry_dirty(sbi, segno); 1940184a5cd2SChao Yu 1941184a5cd2SChao Yu if (!dirtied) 1942184a5cd2SChao Yu add_sit_entry(segno, &SM_I(sbi)->sit_entry_set); 1943351df4b2SJaegeuk Kim } 1944dfc08a12SChao Yu update_sits_in_cursum(journal, -i); 1945b7ad7512SChao Yu up_write(&curseg->journal_rwsem); 1946351df4b2SJaegeuk Kim } 1947351df4b2SJaegeuk Kim 19480a8165d7SJaegeuk Kim /* 1949351df4b2SJaegeuk Kim * CP calls this function, which flushes SIT entries including sit_journal, 1950351df4b2SJaegeuk Kim * and moves prefree segs to free segs. 1951351df4b2SJaegeuk Kim */ 19524b2fecc8SJaegeuk Kim void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) 1953351df4b2SJaegeuk Kim { 1954351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1955351df4b2SJaegeuk Kim unsigned long *bitmap = sit_i->dirty_sentries_bitmap; 1956351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1957b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal; 1958184a5cd2SChao Yu struct sit_entry_set *ses, *tmp; 1959184a5cd2SChao Yu struct list_head *head = &SM_I(sbi)->sit_entry_set; 1960184a5cd2SChao Yu bool to_journal = true; 19614b2fecc8SJaegeuk Kim struct seg_entry *se; 1962351df4b2SJaegeuk Kim 1963351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 1964351df4b2SJaegeuk Kim 19652b11a74bSWanpeng Li if (!sit_i->dirty_sentries) 19662b11a74bSWanpeng Li goto out; 19672b11a74bSWanpeng Li 1968351df4b2SJaegeuk Kim /* 1969184a5cd2SChao Yu * add and account sit entries of dirty bitmap in sit entry 1970184a5cd2SChao Yu * set temporarily 1971351df4b2SJaegeuk Kim */ 1972184a5cd2SChao Yu add_sits_in_set(sbi); 1973351df4b2SJaegeuk Kim 1974184a5cd2SChao Yu /* 1975184a5cd2SChao Yu * if there are no enough space in journal to store dirty sit 1976184a5cd2SChao Yu * entries, remove all entries from journal and add and account 1977184a5cd2SChao Yu * them in sit entry set. 1978184a5cd2SChao Yu */ 1979dfc08a12SChao Yu if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL)) 1980184a5cd2SChao Yu remove_sits_in_journal(sbi); 1981184a5cd2SChao Yu 1982184a5cd2SChao Yu /* 1983184a5cd2SChao Yu * there are two steps to flush sit entries: 1984184a5cd2SChao Yu * #1, flush sit entries to journal in current cold data summary block. 1985184a5cd2SChao Yu * #2, flush sit entries to sit page. 1986184a5cd2SChao Yu */ 1987184a5cd2SChao Yu list_for_each_entry_safe(ses, tmp, head, set_list) { 19884a257ed6SJaegeuk Kim struct page *page = NULL; 1989184a5cd2SChao Yu struct f2fs_sit_block *raw_sit = NULL; 1990184a5cd2SChao Yu unsigned int start_segno = ses->start_segno; 1991184a5cd2SChao Yu unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK, 19927cd8558bSJaegeuk Kim (unsigned long)MAIN_SEGS(sbi)); 1993184a5cd2SChao Yu unsigned int segno = start_segno; 1994184a5cd2SChao Yu 1995184a5cd2SChao Yu if (to_journal && 1996dfc08a12SChao Yu !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL)) 1997184a5cd2SChao Yu to_journal = false; 1998184a5cd2SChao Yu 1999b7ad7512SChao Yu if (to_journal) { 2000b7ad7512SChao Yu down_write(&curseg->journal_rwsem); 2001b7ad7512SChao Yu } else { 2002184a5cd2SChao Yu page = get_next_sit_page(sbi, start_segno); 2003184a5cd2SChao Yu raw_sit = page_address(page); 2004184a5cd2SChao Yu } 2005184a5cd2SChao Yu 2006184a5cd2SChao Yu /* flush dirty sit entries in region of current sit set */ 2007184a5cd2SChao Yu for_each_set_bit_from(segno, bitmap, end) { 2008184a5cd2SChao Yu int offset, sit_offset; 20094b2fecc8SJaegeuk Kim 20104b2fecc8SJaegeuk Kim se = get_seg_entry(sbi, segno); 2011351df4b2SJaegeuk Kim 2012b2955550SJaegeuk Kim /* add discard candidates */ 2013d7bc2484SJaegeuk Kim if (cpc->reason != CP_DISCARD) { 20144b2fecc8SJaegeuk Kim cpc->trim_start = segno; 20154b2fecc8SJaegeuk Kim add_discard_addrs(sbi, cpc); 20164b2fecc8SJaegeuk Kim } 2017b2955550SJaegeuk Kim 2018184a5cd2SChao Yu if (to_journal) { 2019dfc08a12SChao Yu offset = lookup_journal_in_cursum(journal, 2020184a5cd2SChao Yu SIT_JOURNAL, segno, 1); 2021184a5cd2SChao Yu f2fs_bug_on(sbi, offset < 0); 2022dfc08a12SChao Yu segno_in_journal(journal, offset) = 2023184a5cd2SChao Yu cpu_to_le32(segno); 2024184a5cd2SChao Yu seg_info_to_raw_sit(se, 2025dfc08a12SChao Yu &sit_in_journal(journal, offset)); 2026184a5cd2SChao Yu } else { 2027184a5cd2SChao Yu sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); 2028184a5cd2SChao Yu seg_info_to_raw_sit(se, 2029184a5cd2SChao Yu &raw_sit->entries[sit_offset]); 2030351df4b2SJaegeuk Kim } 2031351df4b2SJaegeuk Kim 2032351df4b2SJaegeuk Kim __clear_bit(segno, bitmap); 2033351df4b2SJaegeuk Kim sit_i->dirty_sentries--; 2034184a5cd2SChao Yu ses->entry_cnt--; 2035351df4b2SJaegeuk Kim } 2036184a5cd2SChao Yu 2037b7ad7512SChao Yu if (to_journal) 2038b7ad7512SChao Yu up_write(&curseg->journal_rwsem); 2039b7ad7512SChao Yu else 2040184a5cd2SChao Yu f2fs_put_page(page, 1); 2041184a5cd2SChao Yu 2042184a5cd2SChao Yu f2fs_bug_on(sbi, ses->entry_cnt); 2043184a5cd2SChao Yu release_sit_entry_set(ses); 2044184a5cd2SChao Yu } 2045184a5cd2SChao Yu 2046184a5cd2SChao Yu f2fs_bug_on(sbi, !list_empty(head)); 2047184a5cd2SChao Yu f2fs_bug_on(sbi, sit_i->dirty_sentries); 2048184a5cd2SChao Yu out: 20494b2fecc8SJaegeuk Kim if (cpc->reason == CP_DISCARD) { 20504b2fecc8SJaegeuk Kim for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) 20514b2fecc8SJaegeuk Kim add_discard_addrs(sbi, cpc); 20524b2fecc8SJaegeuk Kim } 2053351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 2054351df4b2SJaegeuk Kim 2055351df4b2SJaegeuk Kim set_prefree_as_free_segments(sbi); 2056351df4b2SJaegeuk Kim } 2057351df4b2SJaegeuk Kim 2058351df4b2SJaegeuk Kim static int build_sit_info(struct f2fs_sb_info *sbi) 2059351df4b2SJaegeuk Kim { 2060351df4b2SJaegeuk Kim struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 2061351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2062351df4b2SJaegeuk Kim struct sit_info *sit_i; 2063351df4b2SJaegeuk Kim unsigned int sit_segs, start; 2064351df4b2SJaegeuk Kim char *src_bitmap, *dst_bitmap; 2065351df4b2SJaegeuk Kim unsigned int bitmap_size; 2066351df4b2SJaegeuk Kim 2067351df4b2SJaegeuk Kim /* allocate memory for SIT information */ 2068351df4b2SJaegeuk Kim sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL); 2069351df4b2SJaegeuk Kim if (!sit_i) 2070351df4b2SJaegeuk Kim return -ENOMEM; 2071351df4b2SJaegeuk Kim 2072351df4b2SJaegeuk Kim SM_I(sbi)->sit_info = sit_i; 2073351df4b2SJaegeuk Kim 207439307a8eSJaegeuk Kim sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) * 207539307a8eSJaegeuk Kim sizeof(struct seg_entry), GFP_KERNEL); 2076351df4b2SJaegeuk Kim if (!sit_i->sentries) 2077351df4b2SJaegeuk Kim return -ENOMEM; 2078351df4b2SJaegeuk Kim 20797cd8558bSJaegeuk Kim bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 208039307a8eSJaegeuk Kim sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); 2081351df4b2SJaegeuk Kim if (!sit_i->dirty_sentries_bitmap) 2082351df4b2SJaegeuk Kim return -ENOMEM; 2083351df4b2SJaegeuk Kim 20847cd8558bSJaegeuk Kim for (start = 0; start < MAIN_SEGS(sbi); start++) { 2085351df4b2SJaegeuk Kim sit_i->sentries[start].cur_valid_map 2086351df4b2SJaegeuk Kim = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 2087351df4b2SJaegeuk Kim sit_i->sentries[start].ckpt_valid_map 2088351df4b2SJaegeuk Kim = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 2089a66cdd98SJaegeuk Kim sit_i->sentries[start].discard_map 2090a66cdd98SJaegeuk Kim = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 2091a66cdd98SJaegeuk Kim if (!sit_i->sentries[start].cur_valid_map || 2092a66cdd98SJaegeuk Kim !sit_i->sentries[start].ckpt_valid_map || 2093a66cdd98SJaegeuk Kim !sit_i->sentries[start].discard_map) 2094351df4b2SJaegeuk Kim return -ENOMEM; 2095351df4b2SJaegeuk Kim } 2096351df4b2SJaegeuk Kim 209760a3b782SJaegeuk Kim sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 209860a3b782SJaegeuk Kim if (!sit_i->tmp_map) 209960a3b782SJaegeuk Kim return -ENOMEM; 210060a3b782SJaegeuk Kim 2101351df4b2SJaegeuk Kim if (sbi->segs_per_sec > 1) { 210239307a8eSJaegeuk Kim sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) * 210339307a8eSJaegeuk Kim sizeof(struct sec_entry), GFP_KERNEL); 2104351df4b2SJaegeuk Kim if (!sit_i->sec_entries) 2105351df4b2SJaegeuk Kim return -ENOMEM; 2106351df4b2SJaegeuk Kim } 2107351df4b2SJaegeuk Kim 2108351df4b2SJaegeuk Kim /* get information related with SIT */ 2109351df4b2SJaegeuk Kim sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; 2110351df4b2SJaegeuk Kim 2111351df4b2SJaegeuk Kim /* setup SIT bitmap from ckeckpoint pack */ 2112351df4b2SJaegeuk Kim bitmap_size = __bitmap_size(sbi, SIT_BITMAP); 2113351df4b2SJaegeuk Kim src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); 2114351df4b2SJaegeuk Kim 211579b5793bSAlexandru Gheorghiu dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); 2116351df4b2SJaegeuk Kim if (!dst_bitmap) 2117351df4b2SJaegeuk Kim return -ENOMEM; 2118351df4b2SJaegeuk Kim 2119351df4b2SJaegeuk Kim /* init SIT information */ 2120351df4b2SJaegeuk Kim sit_i->s_ops = &default_salloc_ops; 2121351df4b2SJaegeuk Kim 2122351df4b2SJaegeuk Kim sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); 2123351df4b2SJaegeuk Kim sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; 2124351df4b2SJaegeuk Kim sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count); 2125351df4b2SJaegeuk Kim sit_i->sit_bitmap = dst_bitmap; 2126351df4b2SJaegeuk Kim sit_i->bitmap_size = bitmap_size; 2127351df4b2SJaegeuk Kim sit_i->dirty_sentries = 0; 2128351df4b2SJaegeuk Kim sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; 2129351df4b2SJaegeuk Kim sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); 2130351df4b2SJaegeuk Kim sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; 2131351df4b2SJaegeuk Kim mutex_init(&sit_i->sentry_lock); 2132351df4b2SJaegeuk Kim return 0; 2133351df4b2SJaegeuk Kim } 2134351df4b2SJaegeuk Kim 2135351df4b2SJaegeuk Kim static int build_free_segmap(struct f2fs_sb_info *sbi) 2136351df4b2SJaegeuk Kim { 2137351df4b2SJaegeuk Kim struct free_segmap_info *free_i; 2138351df4b2SJaegeuk Kim unsigned int bitmap_size, sec_bitmap_size; 2139351df4b2SJaegeuk Kim 2140351df4b2SJaegeuk Kim /* allocate memory for free segmap information */ 2141351df4b2SJaegeuk Kim free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL); 2142351df4b2SJaegeuk Kim if (!free_i) 2143351df4b2SJaegeuk Kim return -ENOMEM; 2144351df4b2SJaegeuk Kim 2145351df4b2SJaegeuk Kim SM_I(sbi)->free_info = free_i; 2146351df4b2SJaegeuk Kim 21477cd8558bSJaegeuk Kim bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 214839307a8eSJaegeuk Kim free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL); 2149351df4b2SJaegeuk Kim if (!free_i->free_segmap) 2150351df4b2SJaegeuk Kim return -ENOMEM; 2151351df4b2SJaegeuk Kim 21527cd8558bSJaegeuk Kim sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 215339307a8eSJaegeuk Kim free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL); 2154351df4b2SJaegeuk Kim if (!free_i->free_secmap) 2155351df4b2SJaegeuk Kim return -ENOMEM; 2156351df4b2SJaegeuk Kim 2157351df4b2SJaegeuk Kim /* set all segments as dirty temporarily */ 2158351df4b2SJaegeuk Kim memset(free_i->free_segmap, 0xff, bitmap_size); 2159351df4b2SJaegeuk Kim memset(free_i->free_secmap, 0xff, sec_bitmap_size); 2160351df4b2SJaegeuk Kim 2161351df4b2SJaegeuk Kim /* init free segmap information */ 21627cd8558bSJaegeuk Kim free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); 2163351df4b2SJaegeuk Kim free_i->free_segments = 0; 2164351df4b2SJaegeuk Kim free_i->free_sections = 0; 21651a118ccfSChao Yu spin_lock_init(&free_i->segmap_lock); 2166351df4b2SJaegeuk Kim return 0; 2167351df4b2SJaegeuk Kim } 2168351df4b2SJaegeuk Kim 2169351df4b2SJaegeuk Kim static int build_curseg(struct f2fs_sb_info *sbi) 2170351df4b2SJaegeuk Kim { 21711042d60fSNamjae Jeon struct curseg_info *array; 2172351df4b2SJaegeuk Kim int i; 2173351df4b2SJaegeuk Kim 2174b434babfSFabian Frederick array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL); 2175351df4b2SJaegeuk Kim if (!array) 2176351df4b2SJaegeuk Kim return -ENOMEM; 2177351df4b2SJaegeuk Kim 2178351df4b2SJaegeuk Kim SM_I(sbi)->curseg_array = array; 2179351df4b2SJaegeuk Kim 2180351df4b2SJaegeuk Kim for (i = 0; i < NR_CURSEG_TYPE; i++) { 2181351df4b2SJaegeuk Kim mutex_init(&array[i].curseg_mutex); 218209cbfeafSKirill A. Shutemov array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL); 2183351df4b2SJaegeuk Kim if (!array[i].sum_blk) 2184351df4b2SJaegeuk Kim return -ENOMEM; 2185b7ad7512SChao Yu init_rwsem(&array[i].journal_rwsem); 2186b7ad7512SChao Yu array[i].journal = kzalloc(sizeof(struct f2fs_journal), 2187b7ad7512SChao Yu GFP_KERNEL); 2188b7ad7512SChao Yu if (!array[i].journal) 2189b7ad7512SChao Yu return -ENOMEM; 2190351df4b2SJaegeuk Kim array[i].segno = NULL_SEGNO; 2191351df4b2SJaegeuk Kim array[i].next_blkoff = 0; 2192351df4b2SJaegeuk Kim } 2193351df4b2SJaegeuk Kim return restore_curseg_summaries(sbi); 2194351df4b2SJaegeuk Kim } 2195351df4b2SJaegeuk Kim 2196351df4b2SJaegeuk Kim static void build_sit_entries(struct f2fs_sb_info *sbi) 2197351df4b2SJaegeuk Kim { 2198351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 2199351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 2200b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal; 220174de593aSChao Yu int sit_blk_cnt = SIT_BLK_CNT(sbi); 220274de593aSChao Yu unsigned int i, start, end; 220374de593aSChao Yu unsigned int readed, start_blk = 0; 2204e9f5b8b8SChao Yu int nrpages = MAX_BIO_BLOCKS(sbi) * 8; 2205351df4b2SJaegeuk Kim 220674de593aSChao Yu do { 220726879fb1SChao Yu readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT, true); 220874de593aSChao Yu 220974de593aSChao Yu start = start_blk * sit_i->sents_per_block; 221074de593aSChao Yu end = (start_blk + readed) * sit_i->sents_per_block; 221174de593aSChao Yu 22127cd8558bSJaegeuk Kim for (; start < end && start < MAIN_SEGS(sbi); start++) { 2213351df4b2SJaegeuk Kim struct seg_entry *se = &sit_i->sentries[start]; 2214351df4b2SJaegeuk Kim struct f2fs_sit_block *sit_blk; 2215351df4b2SJaegeuk Kim struct f2fs_sit_entry sit; 2216351df4b2SJaegeuk Kim struct page *page; 2217351df4b2SJaegeuk Kim 2218b7ad7512SChao Yu down_read(&curseg->journal_rwsem); 2219dfc08a12SChao Yu for (i = 0; i < sits_in_cursum(journal); i++) { 2220dfc08a12SChao Yu if (le32_to_cpu(segno_in_journal(journal, i)) 22216c311ec6SChris Fries == start) { 2222dfc08a12SChao Yu sit = sit_in_journal(journal, i); 2223b7ad7512SChao Yu up_read(&curseg->journal_rwsem); 2224351df4b2SJaegeuk Kim goto got_it; 2225351df4b2SJaegeuk Kim } 2226351df4b2SJaegeuk Kim } 2227b7ad7512SChao Yu up_read(&curseg->journal_rwsem); 222874de593aSChao Yu 2229351df4b2SJaegeuk Kim page = get_current_sit_page(sbi, start); 2230351df4b2SJaegeuk Kim sit_blk = (struct f2fs_sit_block *)page_address(page); 2231351df4b2SJaegeuk Kim sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; 2232351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 2233351df4b2SJaegeuk Kim got_it: 2234351df4b2SJaegeuk Kim check_block_count(sbi, start, &sit); 2235351df4b2SJaegeuk Kim seg_info_from_raw_sit(se, &sit); 2236a66cdd98SJaegeuk Kim 2237a66cdd98SJaegeuk Kim /* build discard map only one time */ 2238a66cdd98SJaegeuk Kim memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); 2239a66cdd98SJaegeuk Kim sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks; 2240a66cdd98SJaegeuk Kim 2241351df4b2SJaegeuk Kim if (sbi->segs_per_sec > 1) { 2242351df4b2SJaegeuk Kim struct sec_entry *e = get_sec_entry(sbi, start); 2243351df4b2SJaegeuk Kim e->valid_blocks += se->valid_blocks; 2244351df4b2SJaegeuk Kim } 2245351df4b2SJaegeuk Kim } 224674de593aSChao Yu start_blk += readed; 224774de593aSChao Yu } while (start_blk < sit_blk_cnt); 2248351df4b2SJaegeuk Kim } 2249351df4b2SJaegeuk Kim 2250351df4b2SJaegeuk Kim static void init_free_segmap(struct f2fs_sb_info *sbi) 2251351df4b2SJaegeuk Kim { 2252351df4b2SJaegeuk Kim unsigned int start; 2253351df4b2SJaegeuk Kim int type; 2254351df4b2SJaegeuk Kim 22557cd8558bSJaegeuk Kim for (start = 0; start < MAIN_SEGS(sbi); start++) { 2256351df4b2SJaegeuk Kim struct seg_entry *sentry = get_seg_entry(sbi, start); 2257351df4b2SJaegeuk Kim if (!sentry->valid_blocks) 2258351df4b2SJaegeuk Kim __set_free(sbi, start); 2259351df4b2SJaegeuk Kim } 2260351df4b2SJaegeuk Kim 2261351df4b2SJaegeuk Kim /* set use the current segments */ 2262351df4b2SJaegeuk Kim for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { 2263351df4b2SJaegeuk Kim struct curseg_info *curseg_t = CURSEG_I(sbi, type); 2264351df4b2SJaegeuk Kim __set_test_and_inuse(sbi, curseg_t->segno); 2265351df4b2SJaegeuk Kim } 2266351df4b2SJaegeuk Kim } 2267351df4b2SJaegeuk Kim 2268351df4b2SJaegeuk Kim static void init_dirty_segmap(struct f2fs_sb_info *sbi) 2269351df4b2SJaegeuk Kim { 2270351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2271351df4b2SJaegeuk Kim struct free_segmap_info *free_i = FREE_I(sbi); 22727cd8558bSJaegeuk Kim unsigned int segno = 0, offset = 0; 2273351df4b2SJaegeuk Kim unsigned short valid_blocks; 2274351df4b2SJaegeuk Kim 22758736fbf0SNamjae Jeon while (1) { 2276351df4b2SJaegeuk Kim /* find dirty segment based on free segmap */ 22777cd8558bSJaegeuk Kim segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset); 22787cd8558bSJaegeuk Kim if (segno >= MAIN_SEGS(sbi)) 2279351df4b2SJaegeuk Kim break; 2280351df4b2SJaegeuk Kim offset = segno + 1; 2281351df4b2SJaegeuk Kim valid_blocks = get_valid_blocks(sbi, segno, 0); 2282ec325b52SJaegeuk Kim if (valid_blocks == sbi->blocks_per_seg || !valid_blocks) 2283351df4b2SJaegeuk Kim continue; 2284ec325b52SJaegeuk Kim if (valid_blocks > sbi->blocks_per_seg) { 2285ec325b52SJaegeuk Kim f2fs_bug_on(sbi, 1); 2286ec325b52SJaegeuk Kim continue; 2287ec325b52SJaegeuk Kim } 2288351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 2289351df4b2SJaegeuk Kim __locate_dirty_segment(sbi, segno, DIRTY); 2290351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 2291351df4b2SJaegeuk Kim } 2292351df4b2SJaegeuk Kim } 2293351df4b2SJaegeuk Kim 22945ec4e49fSJaegeuk Kim static int init_victim_secmap(struct f2fs_sb_info *sbi) 2295351df4b2SJaegeuk Kim { 2296351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 22977cd8558bSJaegeuk Kim unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 2298351df4b2SJaegeuk Kim 229939307a8eSJaegeuk Kim dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); 23005ec4e49fSJaegeuk Kim if (!dirty_i->victim_secmap) 2301351df4b2SJaegeuk Kim return -ENOMEM; 2302351df4b2SJaegeuk Kim return 0; 2303351df4b2SJaegeuk Kim } 2304351df4b2SJaegeuk Kim 2305351df4b2SJaegeuk Kim static int build_dirty_segmap(struct f2fs_sb_info *sbi) 2306351df4b2SJaegeuk Kim { 2307351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i; 2308351df4b2SJaegeuk Kim unsigned int bitmap_size, i; 2309351df4b2SJaegeuk Kim 2310351df4b2SJaegeuk Kim /* allocate memory for dirty segments list information */ 2311351df4b2SJaegeuk Kim dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL); 2312351df4b2SJaegeuk Kim if (!dirty_i) 2313351df4b2SJaegeuk Kim return -ENOMEM; 2314351df4b2SJaegeuk Kim 2315351df4b2SJaegeuk Kim SM_I(sbi)->dirty_info = dirty_i; 2316351df4b2SJaegeuk Kim mutex_init(&dirty_i->seglist_lock); 2317351df4b2SJaegeuk Kim 23187cd8558bSJaegeuk Kim bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 2319351df4b2SJaegeuk Kim 2320351df4b2SJaegeuk Kim for (i = 0; i < NR_DIRTY_TYPE; i++) { 232139307a8eSJaegeuk Kim dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); 2322351df4b2SJaegeuk Kim if (!dirty_i->dirty_segmap[i]) 2323351df4b2SJaegeuk Kim return -ENOMEM; 2324351df4b2SJaegeuk Kim } 2325351df4b2SJaegeuk Kim 2326351df4b2SJaegeuk Kim init_dirty_segmap(sbi); 23275ec4e49fSJaegeuk Kim return init_victim_secmap(sbi); 2328351df4b2SJaegeuk Kim } 2329351df4b2SJaegeuk Kim 23300a8165d7SJaegeuk Kim /* 2331351df4b2SJaegeuk Kim * Update min, max modified time for cost-benefit GC algorithm 2332351df4b2SJaegeuk Kim */ 2333351df4b2SJaegeuk Kim static void init_min_max_mtime(struct f2fs_sb_info *sbi) 2334351df4b2SJaegeuk Kim { 2335351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 2336351df4b2SJaegeuk Kim unsigned int segno; 2337351df4b2SJaegeuk Kim 2338351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 2339351df4b2SJaegeuk Kim 2340351df4b2SJaegeuk Kim sit_i->min_mtime = LLONG_MAX; 2341351df4b2SJaegeuk Kim 23427cd8558bSJaegeuk Kim for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) { 2343351df4b2SJaegeuk Kim unsigned int i; 2344351df4b2SJaegeuk Kim unsigned long long mtime = 0; 2345351df4b2SJaegeuk Kim 2346351df4b2SJaegeuk Kim for (i = 0; i < sbi->segs_per_sec; i++) 2347351df4b2SJaegeuk Kim mtime += get_seg_entry(sbi, segno + i)->mtime; 2348351df4b2SJaegeuk Kim 2349351df4b2SJaegeuk Kim mtime = div_u64(mtime, sbi->segs_per_sec); 2350351df4b2SJaegeuk Kim 2351351df4b2SJaegeuk Kim if (sit_i->min_mtime > mtime) 2352351df4b2SJaegeuk Kim sit_i->min_mtime = mtime; 2353351df4b2SJaegeuk Kim } 2354351df4b2SJaegeuk Kim sit_i->max_mtime = get_mtime(sbi); 2355351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 2356351df4b2SJaegeuk Kim } 2357351df4b2SJaegeuk Kim 2358351df4b2SJaegeuk Kim int build_segment_manager(struct f2fs_sb_info *sbi) 2359351df4b2SJaegeuk Kim { 2360351df4b2SJaegeuk Kim struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 2361351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 23621042d60fSNamjae Jeon struct f2fs_sm_info *sm_info; 2363351df4b2SJaegeuk Kim int err; 2364351df4b2SJaegeuk Kim 2365351df4b2SJaegeuk Kim sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); 2366351df4b2SJaegeuk Kim if (!sm_info) 2367351df4b2SJaegeuk Kim return -ENOMEM; 2368351df4b2SJaegeuk Kim 2369351df4b2SJaegeuk Kim /* init sm info */ 2370351df4b2SJaegeuk Kim sbi->sm_info = sm_info; 2371351df4b2SJaegeuk Kim sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 2372351df4b2SJaegeuk Kim sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 2373351df4b2SJaegeuk Kim sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 2374351df4b2SJaegeuk Kim sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 2375351df4b2SJaegeuk Kim sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 2376351df4b2SJaegeuk Kim sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); 2377351df4b2SJaegeuk Kim sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 237858c41035SJaegeuk Kim sm_info->rec_prefree_segments = sm_info->main_segments * 237958c41035SJaegeuk Kim DEF_RECLAIM_PREFREE_SEGMENTS / 100; 23809b5f136fSJaegeuk Kim sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC; 2381216fbd64SJaegeuk Kim sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; 2382c1ce1b02SJaegeuk Kim sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; 2383351df4b2SJaegeuk Kim 23847fd9e544SJaegeuk Kim INIT_LIST_HEAD(&sm_info->discard_list); 23857fd9e544SJaegeuk Kim sm_info->nr_discards = 0; 23867fd9e544SJaegeuk Kim sm_info->max_discards = 0; 23877fd9e544SJaegeuk Kim 2388bba681cbSJaegeuk Kim sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS; 2389bba681cbSJaegeuk Kim 2390184a5cd2SChao Yu INIT_LIST_HEAD(&sm_info->sit_entry_set); 2391184a5cd2SChao Yu 2392b270ad6fSGu Zheng if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) { 23932163d198SGu Zheng err = create_flush_cmd_control(sbi); 23942163d198SGu Zheng if (err) 2395a688b9d9SGu Zheng return err; 2396a688b9d9SGu Zheng } 23976b4afdd7SJaegeuk Kim 2398351df4b2SJaegeuk Kim err = build_sit_info(sbi); 2399351df4b2SJaegeuk Kim if (err) 2400351df4b2SJaegeuk Kim return err; 2401351df4b2SJaegeuk Kim err = build_free_segmap(sbi); 2402351df4b2SJaegeuk Kim if (err) 2403351df4b2SJaegeuk Kim return err; 2404351df4b2SJaegeuk Kim err = build_curseg(sbi); 2405351df4b2SJaegeuk Kim if (err) 2406351df4b2SJaegeuk Kim return err; 2407351df4b2SJaegeuk Kim 2408351df4b2SJaegeuk Kim /* reinit free segmap based on SIT */ 2409351df4b2SJaegeuk Kim build_sit_entries(sbi); 2410351df4b2SJaegeuk Kim 2411351df4b2SJaegeuk Kim init_free_segmap(sbi); 2412351df4b2SJaegeuk Kim err = build_dirty_segmap(sbi); 2413351df4b2SJaegeuk Kim if (err) 2414351df4b2SJaegeuk Kim return err; 2415351df4b2SJaegeuk Kim 2416351df4b2SJaegeuk Kim init_min_max_mtime(sbi); 2417351df4b2SJaegeuk Kim return 0; 2418351df4b2SJaegeuk Kim } 2419351df4b2SJaegeuk Kim 2420351df4b2SJaegeuk Kim static void discard_dirty_segmap(struct f2fs_sb_info *sbi, 2421351df4b2SJaegeuk Kim enum dirty_type dirty_type) 2422351df4b2SJaegeuk Kim { 2423351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2424351df4b2SJaegeuk Kim 2425351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 242639307a8eSJaegeuk Kim kvfree(dirty_i->dirty_segmap[dirty_type]); 2427351df4b2SJaegeuk Kim dirty_i->nr_dirty[dirty_type] = 0; 2428351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 2429351df4b2SJaegeuk Kim } 2430351df4b2SJaegeuk Kim 24315ec4e49fSJaegeuk Kim static void destroy_victim_secmap(struct f2fs_sb_info *sbi) 2432351df4b2SJaegeuk Kim { 2433351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 243439307a8eSJaegeuk Kim kvfree(dirty_i->victim_secmap); 2435351df4b2SJaegeuk Kim } 2436351df4b2SJaegeuk Kim 2437351df4b2SJaegeuk Kim static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) 2438351df4b2SJaegeuk Kim { 2439351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 2440351df4b2SJaegeuk Kim int i; 2441351df4b2SJaegeuk Kim 2442351df4b2SJaegeuk Kim if (!dirty_i) 2443351df4b2SJaegeuk Kim return; 2444351df4b2SJaegeuk Kim 2445351df4b2SJaegeuk Kim /* discard pre-free/dirty segments list */ 2446351df4b2SJaegeuk Kim for (i = 0; i < NR_DIRTY_TYPE; i++) 2447351df4b2SJaegeuk Kim discard_dirty_segmap(sbi, i); 2448351df4b2SJaegeuk Kim 24495ec4e49fSJaegeuk Kim destroy_victim_secmap(sbi); 2450351df4b2SJaegeuk Kim SM_I(sbi)->dirty_info = NULL; 2451351df4b2SJaegeuk Kim kfree(dirty_i); 2452351df4b2SJaegeuk Kim } 2453351df4b2SJaegeuk Kim 2454351df4b2SJaegeuk Kim static void destroy_curseg(struct f2fs_sb_info *sbi) 2455351df4b2SJaegeuk Kim { 2456351df4b2SJaegeuk Kim struct curseg_info *array = SM_I(sbi)->curseg_array; 2457351df4b2SJaegeuk Kim int i; 2458351df4b2SJaegeuk Kim 2459351df4b2SJaegeuk Kim if (!array) 2460351df4b2SJaegeuk Kim return; 2461351df4b2SJaegeuk Kim SM_I(sbi)->curseg_array = NULL; 2462b7ad7512SChao Yu for (i = 0; i < NR_CURSEG_TYPE; i++) { 2463351df4b2SJaegeuk Kim kfree(array[i].sum_blk); 2464b7ad7512SChao Yu kfree(array[i].journal); 2465b7ad7512SChao Yu } 2466351df4b2SJaegeuk Kim kfree(array); 2467351df4b2SJaegeuk Kim } 2468351df4b2SJaegeuk Kim 2469351df4b2SJaegeuk Kim static void destroy_free_segmap(struct f2fs_sb_info *sbi) 2470351df4b2SJaegeuk Kim { 2471351df4b2SJaegeuk Kim struct free_segmap_info *free_i = SM_I(sbi)->free_info; 2472351df4b2SJaegeuk Kim if (!free_i) 2473351df4b2SJaegeuk Kim return; 2474351df4b2SJaegeuk Kim SM_I(sbi)->free_info = NULL; 247539307a8eSJaegeuk Kim kvfree(free_i->free_segmap); 247639307a8eSJaegeuk Kim kvfree(free_i->free_secmap); 2477351df4b2SJaegeuk Kim kfree(free_i); 2478351df4b2SJaegeuk Kim } 2479351df4b2SJaegeuk Kim 2480351df4b2SJaegeuk Kim static void destroy_sit_info(struct f2fs_sb_info *sbi) 2481351df4b2SJaegeuk Kim { 2482351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 2483351df4b2SJaegeuk Kim unsigned int start; 2484351df4b2SJaegeuk Kim 2485351df4b2SJaegeuk Kim if (!sit_i) 2486351df4b2SJaegeuk Kim return; 2487351df4b2SJaegeuk Kim 2488351df4b2SJaegeuk Kim if (sit_i->sentries) { 24897cd8558bSJaegeuk Kim for (start = 0; start < MAIN_SEGS(sbi); start++) { 2490351df4b2SJaegeuk Kim kfree(sit_i->sentries[start].cur_valid_map); 2491351df4b2SJaegeuk Kim kfree(sit_i->sentries[start].ckpt_valid_map); 2492a66cdd98SJaegeuk Kim kfree(sit_i->sentries[start].discard_map); 2493351df4b2SJaegeuk Kim } 2494351df4b2SJaegeuk Kim } 249560a3b782SJaegeuk Kim kfree(sit_i->tmp_map); 249660a3b782SJaegeuk Kim 249739307a8eSJaegeuk Kim kvfree(sit_i->sentries); 249839307a8eSJaegeuk Kim kvfree(sit_i->sec_entries); 249939307a8eSJaegeuk Kim kvfree(sit_i->dirty_sentries_bitmap); 2500351df4b2SJaegeuk Kim 2501351df4b2SJaegeuk Kim SM_I(sbi)->sit_info = NULL; 2502351df4b2SJaegeuk Kim kfree(sit_i->sit_bitmap); 2503351df4b2SJaegeuk Kim kfree(sit_i); 2504351df4b2SJaegeuk Kim } 2505351df4b2SJaegeuk Kim 2506351df4b2SJaegeuk Kim void destroy_segment_manager(struct f2fs_sb_info *sbi) 2507351df4b2SJaegeuk Kim { 2508351df4b2SJaegeuk Kim struct f2fs_sm_info *sm_info = SM_I(sbi); 2509a688b9d9SGu Zheng 25103b03f724SChao Yu if (!sm_info) 25113b03f724SChao Yu return; 25122163d198SGu Zheng destroy_flush_cmd_control(sbi); 2513351df4b2SJaegeuk Kim destroy_dirty_segmap(sbi); 2514351df4b2SJaegeuk Kim destroy_curseg(sbi); 2515351df4b2SJaegeuk Kim destroy_free_segmap(sbi); 2516351df4b2SJaegeuk Kim destroy_sit_info(sbi); 2517351df4b2SJaegeuk Kim sbi->sm_info = NULL; 2518351df4b2SJaegeuk Kim kfree(sm_info); 2519351df4b2SJaegeuk Kim } 25207fd9e544SJaegeuk Kim 25217fd9e544SJaegeuk Kim int __init create_segment_manager_caches(void) 25227fd9e544SJaegeuk Kim { 25237fd9e544SJaegeuk Kim discard_entry_slab = f2fs_kmem_cache_create("discard_entry", 2524e8512d2eSGu Zheng sizeof(struct discard_entry)); 25257fd9e544SJaegeuk Kim if (!discard_entry_slab) 2526184a5cd2SChao Yu goto fail; 2527184a5cd2SChao Yu 2528184a5cd2SChao Yu sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set", 2529c9ee0085SChangman Lee sizeof(struct sit_entry_set)); 2530184a5cd2SChao Yu if (!sit_entry_set_slab) 2531184a5cd2SChao Yu goto destory_discard_entry; 253288b88a66SJaegeuk Kim 253388b88a66SJaegeuk Kim inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry", 253488b88a66SJaegeuk Kim sizeof(struct inmem_pages)); 253588b88a66SJaegeuk Kim if (!inmem_entry_slab) 253688b88a66SJaegeuk Kim goto destroy_sit_entry_set; 25377fd9e544SJaegeuk Kim return 0; 2538184a5cd2SChao Yu 253988b88a66SJaegeuk Kim destroy_sit_entry_set: 254088b88a66SJaegeuk Kim kmem_cache_destroy(sit_entry_set_slab); 2541184a5cd2SChao Yu destory_discard_entry: 2542184a5cd2SChao Yu kmem_cache_destroy(discard_entry_slab); 2543184a5cd2SChao Yu fail: 2544184a5cd2SChao Yu return -ENOMEM; 25457fd9e544SJaegeuk Kim } 25467fd9e544SJaegeuk Kim 25477fd9e544SJaegeuk Kim void destroy_segment_manager_caches(void) 25487fd9e544SJaegeuk Kim { 2549184a5cd2SChao Yu kmem_cache_destroy(sit_entry_set_slab); 25507fd9e544SJaegeuk Kim kmem_cache_destroy(discard_entry_slab); 255188b88a66SJaegeuk Kim kmem_cache_destroy(inmem_entry_slab); 25527fd9e544SJaegeuk Kim } 2553