10a8165d7SJaegeuk Kim /* 2351df4b2SJaegeuk Kim * fs/f2fs/segment.c 3351df4b2SJaegeuk Kim * 4351df4b2SJaegeuk Kim * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5351df4b2SJaegeuk Kim * http://www.samsung.com/ 6351df4b2SJaegeuk Kim * 7351df4b2SJaegeuk Kim * This program is free software; you can redistribute it and/or modify 8351df4b2SJaegeuk Kim * it under the terms of the GNU General Public License version 2 as 9351df4b2SJaegeuk Kim * published by the Free Software Foundation. 10351df4b2SJaegeuk Kim */ 11351df4b2SJaegeuk Kim #include <linux/fs.h> 12351df4b2SJaegeuk Kim #include <linux/f2fs_fs.h> 13351df4b2SJaegeuk Kim #include <linux/bio.h> 14351df4b2SJaegeuk Kim #include <linux/blkdev.h> 15690e4a3eSGeert Uytterhoeven #include <linux/prefetch.h> 166b4afdd7SJaegeuk Kim #include <linux/kthread.h> 1774de593aSChao Yu #include <linux/swap.h> 1860b99b48SJaegeuk Kim #include <linux/timer.h> 19351df4b2SJaegeuk Kim 20351df4b2SJaegeuk Kim #include "f2fs.h" 21351df4b2SJaegeuk Kim #include "segment.h" 22351df4b2SJaegeuk Kim #include "node.h" 239e4ded3fSJaegeuk Kim #include "trace.h" 246ec178daSNamjae Jeon #include <trace/events/f2fs.h> 25351df4b2SJaegeuk Kim 269a7f143aSChangman Lee #define __reverse_ffz(x) __reverse_ffs(~(x)) 279a7f143aSChangman Lee 287fd9e544SJaegeuk Kim static struct kmem_cache *discard_entry_slab; 29b01a9201SJaegeuk Kim static struct kmem_cache *discard_cmd_slab; 30184a5cd2SChao Yu static struct kmem_cache *sit_entry_set_slab; 3188b88a66SJaegeuk Kim static struct kmem_cache *inmem_entry_slab; 327fd9e544SJaegeuk Kim 33f96999c3SJaegeuk Kim static unsigned long __reverse_ulong(unsigned char *str) 34f96999c3SJaegeuk Kim { 35f96999c3SJaegeuk Kim unsigned long tmp = 0; 36f96999c3SJaegeuk Kim int shift = 24, idx = 0; 37f96999c3SJaegeuk Kim 38f96999c3SJaegeuk Kim #if BITS_PER_LONG == 64 39f96999c3SJaegeuk Kim shift = 56; 40f96999c3SJaegeuk Kim #endif 41f96999c3SJaegeuk Kim while (shift >= 0) { 42f96999c3SJaegeuk Kim tmp |= (unsigned long)str[idx++] << shift; 43f96999c3SJaegeuk Kim shift -= BITS_PER_BYTE; 44f96999c3SJaegeuk Kim } 45f96999c3SJaegeuk Kim return tmp; 46f96999c3SJaegeuk Kim } 47f96999c3SJaegeuk Kim 489a7f143aSChangman Lee /* 499a7f143aSChangman Lee * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since 509a7f143aSChangman Lee * MSB and LSB are reversed in a byte by f2fs_set_bit. 519a7f143aSChangman Lee */ 529a7f143aSChangman Lee static inline unsigned long __reverse_ffs(unsigned long word) 539a7f143aSChangman Lee { 549a7f143aSChangman Lee int num = 0; 559a7f143aSChangman Lee 569a7f143aSChangman Lee #if BITS_PER_LONG == 64 57f96999c3SJaegeuk Kim if ((word & 0xffffffff00000000UL) == 0) 589a7f143aSChangman Lee num += 32; 59f96999c3SJaegeuk Kim else 609a7f143aSChangman Lee word >>= 32; 619a7f143aSChangman Lee #endif 62f96999c3SJaegeuk Kim if ((word & 0xffff0000) == 0) 639a7f143aSChangman Lee num += 16; 64f96999c3SJaegeuk Kim else 659a7f143aSChangman Lee word >>= 16; 66f96999c3SJaegeuk Kim 67f96999c3SJaegeuk Kim if ((word & 0xff00) == 0) 689a7f143aSChangman Lee num += 8; 69f96999c3SJaegeuk Kim else 709a7f143aSChangman Lee word >>= 8; 71f96999c3SJaegeuk Kim 729a7f143aSChangman Lee if ((word & 0xf0) == 0) 739a7f143aSChangman Lee num += 4; 749a7f143aSChangman Lee else 759a7f143aSChangman Lee word >>= 4; 76f96999c3SJaegeuk Kim 779a7f143aSChangman Lee if ((word & 0xc) == 0) 789a7f143aSChangman Lee num += 2; 799a7f143aSChangman Lee else 809a7f143aSChangman Lee word >>= 2; 81f96999c3SJaegeuk Kim 829a7f143aSChangman Lee if ((word & 0x2) == 0) 839a7f143aSChangman Lee num += 1; 849a7f143aSChangman Lee return num; 859a7f143aSChangman Lee } 869a7f143aSChangman Lee 879a7f143aSChangman Lee /* 88e1c42045Sarter97 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because 899a7f143aSChangman Lee * f2fs_set_bit makes MSB and LSB reversed in a byte. 90692223d1SFan Li * @size must be integral times of unsigned long. 919a7f143aSChangman Lee * Example: 92f96999c3SJaegeuk Kim * MSB <--> LSB 93f96999c3SJaegeuk Kim * f2fs_set_bit(0, bitmap) => 1000 0000 94f96999c3SJaegeuk Kim * f2fs_set_bit(7, bitmap) => 0000 0001 959a7f143aSChangman Lee */ 969a7f143aSChangman Lee static unsigned long __find_rev_next_bit(const unsigned long *addr, 979a7f143aSChangman Lee unsigned long size, unsigned long offset) 989a7f143aSChangman Lee { 999a7f143aSChangman Lee const unsigned long *p = addr + BIT_WORD(offset); 100692223d1SFan Li unsigned long result = size; 1019a7f143aSChangman Lee unsigned long tmp; 1029a7f143aSChangman Lee 1039a7f143aSChangman Lee if (offset >= size) 1049a7f143aSChangman Lee return size; 1059a7f143aSChangman Lee 106692223d1SFan Li size -= (offset & ~(BITS_PER_LONG - 1)); 1079a7f143aSChangman Lee offset %= BITS_PER_LONG; 108692223d1SFan Li 109692223d1SFan Li while (1) { 110692223d1SFan Li if (*p == 0) 111692223d1SFan Li goto pass; 1129a7f143aSChangman Lee 113f96999c3SJaegeuk Kim tmp = __reverse_ulong((unsigned char *)p); 114692223d1SFan Li 115f96999c3SJaegeuk Kim tmp &= ~0UL >> offset; 1169a7f143aSChangman Lee if (size < BITS_PER_LONG) 117692223d1SFan Li tmp &= (~0UL << (BITS_PER_LONG - size)); 1189a7f143aSChangman Lee if (tmp) 119692223d1SFan Li goto found; 120692223d1SFan Li pass: 121692223d1SFan Li if (size <= BITS_PER_LONG) 122692223d1SFan Li break; 1239a7f143aSChangman Lee size -= BITS_PER_LONG; 124692223d1SFan Li offset = 0; 125f96999c3SJaegeuk Kim p++; 1269a7f143aSChangman Lee } 1279a7f143aSChangman Lee return result; 128692223d1SFan Li found: 129692223d1SFan Li return result - size + __reverse_ffs(tmp); 1309a7f143aSChangman Lee } 1319a7f143aSChangman Lee 1329a7f143aSChangman Lee static unsigned long __find_rev_next_zero_bit(const unsigned long *addr, 1339a7f143aSChangman Lee unsigned long size, unsigned long offset) 1349a7f143aSChangman Lee { 1359a7f143aSChangman Lee const unsigned long *p = addr + BIT_WORD(offset); 13680609448SJaegeuk Kim unsigned long result = size; 1379a7f143aSChangman Lee unsigned long tmp; 1389a7f143aSChangman Lee 1399a7f143aSChangman Lee if (offset >= size) 1409a7f143aSChangman Lee return size; 1419a7f143aSChangman Lee 14280609448SJaegeuk Kim size -= (offset & ~(BITS_PER_LONG - 1)); 1439a7f143aSChangman Lee offset %= BITS_PER_LONG; 14480609448SJaegeuk Kim 14580609448SJaegeuk Kim while (1) { 14680609448SJaegeuk Kim if (*p == ~0UL) 14780609448SJaegeuk Kim goto pass; 1489a7f143aSChangman Lee 149f96999c3SJaegeuk Kim tmp = __reverse_ulong((unsigned char *)p); 150f96999c3SJaegeuk Kim 15180609448SJaegeuk Kim if (offset) 15280609448SJaegeuk Kim tmp |= ~0UL << (BITS_PER_LONG - offset); 1539a7f143aSChangman Lee if (size < BITS_PER_LONG) 15480609448SJaegeuk Kim tmp |= ~0UL >> size; 155f96999c3SJaegeuk Kim if (tmp != ~0UL) 15680609448SJaegeuk Kim goto found; 15780609448SJaegeuk Kim pass: 15880609448SJaegeuk Kim if (size <= BITS_PER_LONG) 15980609448SJaegeuk Kim break; 1609a7f143aSChangman Lee size -= BITS_PER_LONG; 16180609448SJaegeuk Kim offset = 0; 162f96999c3SJaegeuk Kim p++; 1639a7f143aSChangman Lee } 1649a7f143aSChangman Lee return result; 16580609448SJaegeuk Kim found: 16680609448SJaegeuk Kim return result - size + __reverse_ffz(tmp); 1679a7f143aSChangman Lee } 1689a7f143aSChangman Lee 16988b88a66SJaegeuk Kim void register_inmem_page(struct inode *inode, struct page *page) 17088b88a66SJaegeuk Kim { 17188b88a66SJaegeuk Kim struct f2fs_inode_info *fi = F2FS_I(inode); 17288b88a66SJaegeuk Kim struct inmem_pages *new; 1739be32d72SJaegeuk Kim 1749e4ded3fSJaegeuk Kim f2fs_trace_pid(page); 1750722b101SJaegeuk Kim 176decd36b6SChao Yu set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE); 177decd36b6SChao Yu SetPagePrivate(page); 178decd36b6SChao Yu 17988b88a66SJaegeuk Kim new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS); 18088b88a66SJaegeuk Kim 18188b88a66SJaegeuk Kim /* add atomic page indices to the list */ 18288b88a66SJaegeuk Kim new->page = page; 18388b88a66SJaegeuk Kim INIT_LIST_HEAD(&new->list); 184decd36b6SChao Yu 18588b88a66SJaegeuk Kim /* increase reference count with clean state */ 18688b88a66SJaegeuk Kim mutex_lock(&fi->inmem_lock); 18788b88a66SJaegeuk Kim get_page(page); 18888b88a66SJaegeuk Kim list_add_tail(&new->list, &fi->inmem_pages); 1898dcf2ff7SJaegeuk Kim inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); 19088b88a66SJaegeuk Kim mutex_unlock(&fi->inmem_lock); 1918ce67cb0SJaegeuk Kim 1928ce67cb0SJaegeuk Kim trace_f2fs_register_inmem_page(page, INMEM); 19388b88a66SJaegeuk Kim } 19488b88a66SJaegeuk Kim 19528bc106bSChao Yu static int __revoke_inmem_pages(struct inode *inode, 19628bc106bSChao Yu struct list_head *head, bool drop, bool recover) 19729b96b54SChao Yu { 19828bc106bSChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 19929b96b54SChao Yu struct inmem_pages *cur, *tmp; 20028bc106bSChao Yu int err = 0; 20129b96b54SChao Yu 20229b96b54SChao Yu list_for_each_entry_safe(cur, tmp, head, list) { 20328bc106bSChao Yu struct page *page = cur->page; 20429b96b54SChao Yu 20528bc106bSChao Yu if (drop) 20628bc106bSChao Yu trace_f2fs_commit_inmem_page(page, INMEM_DROP); 20728bc106bSChao Yu 20828bc106bSChao Yu lock_page(page); 20928bc106bSChao Yu 21028bc106bSChao Yu if (recover) { 21128bc106bSChao Yu struct dnode_of_data dn; 21228bc106bSChao Yu struct node_info ni; 21328bc106bSChao Yu 21428bc106bSChao Yu trace_f2fs_commit_inmem_page(page, INMEM_REVOKE); 21528bc106bSChao Yu 21628bc106bSChao Yu set_new_dnode(&dn, inode, NULL, NULL, 0); 21728bc106bSChao Yu if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) { 21828bc106bSChao Yu err = -EAGAIN; 21928bc106bSChao Yu goto next; 22028bc106bSChao Yu } 22128bc106bSChao Yu get_node_info(sbi, dn.nid, &ni); 22228bc106bSChao Yu f2fs_replace_block(sbi, &dn, dn.data_blkaddr, 22328bc106bSChao Yu cur->old_addr, ni.version, true, true); 22428bc106bSChao Yu f2fs_put_dnode(&dn); 22528bc106bSChao Yu } 22628bc106bSChao Yu next: 22763c52d78SJaegeuk Kim /* we don't need to invalidate this in the sccessful status */ 22863c52d78SJaegeuk Kim if (drop || recover) 22928bc106bSChao Yu ClearPageUptodate(page); 23028bc106bSChao Yu set_page_private(page, 0); 231c81ced05SChao Yu ClearPagePrivate(page); 23228bc106bSChao Yu f2fs_put_page(page, 1); 23329b96b54SChao Yu 23429b96b54SChao Yu list_del(&cur->list); 23529b96b54SChao Yu kmem_cache_free(inmem_entry_slab, cur); 23629b96b54SChao Yu dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); 23729b96b54SChao Yu } 23828bc106bSChao Yu return err; 23929b96b54SChao Yu } 24029b96b54SChao Yu 24129b96b54SChao Yu void drop_inmem_pages(struct inode *inode) 24229b96b54SChao Yu { 24329b96b54SChao Yu struct f2fs_inode_info *fi = F2FS_I(inode); 24429b96b54SChao Yu 24529b96b54SChao Yu mutex_lock(&fi->inmem_lock); 24628bc106bSChao Yu __revoke_inmem_pages(inode, &fi->inmem_pages, true, false); 24729b96b54SChao Yu mutex_unlock(&fi->inmem_lock); 2485fe45743SChao Yu 2495fe45743SChao Yu clear_inode_flag(inode, FI_ATOMIC_FILE); 2505fe45743SChao Yu stat_dec_atomic_write(inode); 25129b96b54SChao Yu } 25229b96b54SChao Yu 2538c242db9SJaegeuk Kim void drop_inmem_page(struct inode *inode, struct page *page) 2548c242db9SJaegeuk Kim { 2558c242db9SJaegeuk Kim struct f2fs_inode_info *fi = F2FS_I(inode); 2568c242db9SJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2578c242db9SJaegeuk Kim struct list_head *head = &fi->inmem_pages; 2588c242db9SJaegeuk Kim struct inmem_pages *cur = NULL; 2598c242db9SJaegeuk Kim 2608c242db9SJaegeuk Kim f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page)); 2618c242db9SJaegeuk Kim 2628c242db9SJaegeuk Kim mutex_lock(&fi->inmem_lock); 2638c242db9SJaegeuk Kim list_for_each_entry(cur, head, list) { 2648c242db9SJaegeuk Kim if (cur->page == page) 2658c242db9SJaegeuk Kim break; 2668c242db9SJaegeuk Kim } 2678c242db9SJaegeuk Kim 2688c242db9SJaegeuk Kim f2fs_bug_on(sbi, !cur || cur->page != page); 2698c242db9SJaegeuk Kim list_del(&cur->list); 2708c242db9SJaegeuk Kim mutex_unlock(&fi->inmem_lock); 2718c242db9SJaegeuk Kim 2728c242db9SJaegeuk Kim dec_page_count(sbi, F2FS_INMEM_PAGES); 2738c242db9SJaegeuk Kim kmem_cache_free(inmem_entry_slab, cur); 2748c242db9SJaegeuk Kim 2758c242db9SJaegeuk Kim ClearPageUptodate(page); 2768c242db9SJaegeuk Kim set_page_private(page, 0); 2778c242db9SJaegeuk Kim ClearPagePrivate(page); 2788c242db9SJaegeuk Kim f2fs_put_page(page, 0); 2798c242db9SJaegeuk Kim 2808c242db9SJaegeuk Kim trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE); 2818c242db9SJaegeuk Kim } 2828c242db9SJaegeuk Kim 28328bc106bSChao Yu static int __commit_inmem_pages(struct inode *inode, 28428bc106bSChao Yu struct list_head *revoke_list) 28588b88a66SJaegeuk Kim { 28688b88a66SJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 28788b88a66SJaegeuk Kim struct f2fs_inode_info *fi = F2FS_I(inode); 28888b88a66SJaegeuk Kim struct inmem_pages *cur, *tmp; 28988b88a66SJaegeuk Kim struct f2fs_io_info fio = { 29005ca3632SJaegeuk Kim .sbi = sbi, 29188b88a66SJaegeuk Kim .type = DATA, 29204d328deSMike Christie .op = REQ_OP_WRITE, 29370fd7614SChristoph Hellwig .op_flags = REQ_SYNC | REQ_PRIO, 29488b88a66SJaegeuk Kim }; 295942fd319SJaegeuk Kim pgoff_t last_idx = ULONG_MAX; 296edb27deeSJaegeuk Kim int err = 0; 29788b88a66SJaegeuk Kim 29888b88a66SJaegeuk Kim list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) { 29928bc106bSChao Yu struct page *page = cur->page; 30028bc106bSChao Yu 30128bc106bSChao Yu lock_page(page); 30228bc106bSChao Yu if (page->mapping == inode->i_mapping) { 30328bc106bSChao Yu trace_f2fs_commit_inmem_page(page, INMEM); 30428bc106bSChao Yu 30528bc106bSChao Yu set_page_dirty(page); 30628bc106bSChao Yu f2fs_wait_on_page_writeback(page, DATA, true); 307933439c8SChao Yu if (clear_page_dirty_for_io(page)) { 30888b88a66SJaegeuk Kim inode_dec_dirty_pages(inode); 309933439c8SChao Yu remove_dirty_inode(inode); 310933439c8SChao Yu } 31128bc106bSChao Yu 31228bc106bSChao Yu fio.page = page; 313e959c8f5SHou Pengyang fio.old_blkaddr = NULL_ADDR; 3144d978078SJaegeuk Kim fio.encrypted_page = NULL; 315edb27deeSJaegeuk Kim err = do_write_data_page(&fio); 316edb27deeSJaegeuk Kim if (err) { 31728bc106bSChao Yu unlock_page(page); 318edb27deeSJaegeuk Kim break; 319edb27deeSJaegeuk Kim } 32028bc106bSChao Yu 32128bc106bSChao Yu /* record old blkaddr for revoking */ 32228bc106bSChao Yu cur->old_addr = fio.old_blkaddr; 323942fd319SJaegeuk Kim last_idx = page->index; 32488b88a66SJaegeuk Kim } 32528bc106bSChao Yu unlock_page(page); 32628bc106bSChao Yu list_move_tail(&cur->list, revoke_list); 32788b88a66SJaegeuk Kim } 32829b96b54SChao Yu 329942fd319SJaegeuk Kim if (last_idx != ULONG_MAX) 330942fd319SJaegeuk Kim f2fs_submit_merged_bio_cond(sbi, inode, 0, last_idx, 331942fd319SJaegeuk Kim DATA, WRITE); 33228bc106bSChao Yu 33328bc106bSChao Yu if (!err) 33428bc106bSChao Yu __revoke_inmem_pages(inode, revoke_list, false, false); 33528bc106bSChao Yu 33629b96b54SChao Yu return err; 33729b96b54SChao Yu } 33829b96b54SChao Yu 33929b96b54SChao Yu int commit_inmem_pages(struct inode *inode) 34029b96b54SChao Yu { 34129b96b54SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 34229b96b54SChao Yu struct f2fs_inode_info *fi = F2FS_I(inode); 34328bc106bSChao Yu struct list_head revoke_list; 34428bc106bSChao Yu int err; 34529b96b54SChao Yu 34628bc106bSChao Yu INIT_LIST_HEAD(&revoke_list); 34729b96b54SChao Yu f2fs_balance_fs(sbi, true); 34829b96b54SChao Yu f2fs_lock_op(sbi); 34929b96b54SChao Yu 3505fe45743SChao Yu set_inode_flag(inode, FI_ATOMIC_COMMIT); 3515fe45743SChao Yu 35229b96b54SChao Yu mutex_lock(&fi->inmem_lock); 35328bc106bSChao Yu err = __commit_inmem_pages(inode, &revoke_list); 35428bc106bSChao Yu if (err) { 35528bc106bSChao Yu int ret; 35628bc106bSChao Yu /* 35728bc106bSChao Yu * try to revoke all committed pages, but still we could fail 35828bc106bSChao Yu * due to no memory or other reason, if that happened, EAGAIN 35928bc106bSChao Yu * will be returned, which means in such case, transaction is 36028bc106bSChao Yu * already not integrity, caller should use journal to do the 36128bc106bSChao Yu * recovery or rewrite & commit last transaction. For other 36228bc106bSChao Yu * error number, revoking was done by filesystem itself. 36328bc106bSChao Yu */ 36428bc106bSChao Yu ret = __revoke_inmem_pages(inode, &revoke_list, false, true); 36528bc106bSChao Yu if (ret) 36628bc106bSChao Yu err = ret; 36728bc106bSChao Yu 36828bc106bSChao Yu /* drop all uncommitted pages */ 36928bc106bSChao Yu __revoke_inmem_pages(inode, &fi->inmem_pages, true, false); 37028bc106bSChao Yu } 37188b88a66SJaegeuk Kim mutex_unlock(&fi->inmem_lock); 37288b88a66SJaegeuk Kim 3735fe45743SChao Yu clear_inode_flag(inode, FI_ATOMIC_COMMIT); 3745fe45743SChao Yu 37588b88a66SJaegeuk Kim f2fs_unlock_op(sbi); 376edb27deeSJaegeuk Kim return err; 37788b88a66SJaegeuk Kim } 37888b88a66SJaegeuk Kim 3790a8165d7SJaegeuk Kim /* 380351df4b2SJaegeuk Kim * This function balances dirty node and dentry pages. 381351df4b2SJaegeuk Kim * In addition, it controls garbage collection. 382351df4b2SJaegeuk Kim */ 3832c4db1a6SJaegeuk Kim void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) 384351df4b2SJaegeuk Kim { 3850f348028SChao Yu #ifdef CONFIG_F2FS_FAULT_INJECTION 38655523519SChao Yu if (time_to_inject(sbi, FAULT_CHECKPOINT)) { 38755523519SChao Yu f2fs_show_injection_info(FAULT_CHECKPOINT); 3880f348028SChao Yu f2fs_stop_checkpoint(sbi, false); 38955523519SChao Yu } 3900f348028SChao Yu #endif 3910f348028SChao Yu 392e589c2c4SJaegeuk Kim /* balance_fs_bg is able to be pending */ 393a7881893SJaegeuk Kim if (need && excess_cached_nats(sbi)) 394e589c2c4SJaegeuk Kim f2fs_balance_fs_bg(sbi); 395e589c2c4SJaegeuk Kim 396351df4b2SJaegeuk Kim /* 397029cd28cSJaegeuk Kim * We should do GC or end up with checkpoint, if there are so many dirty 398029cd28cSJaegeuk Kim * dir/node pages without enough free segments. 399351df4b2SJaegeuk Kim */ 4007f3037a5SJaegeuk Kim if (has_not_enough_free_secs(sbi, 0, 0)) { 401351df4b2SJaegeuk Kim mutex_lock(&sbi->gc_mutex); 402e066b83cSJaegeuk Kim f2fs_gc(sbi, false, false, NULL_SEGNO); 403351df4b2SJaegeuk Kim } 404351df4b2SJaegeuk Kim } 405351df4b2SJaegeuk Kim 4064660f9c0SJaegeuk Kim void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) 4074660f9c0SJaegeuk Kim { 4081dcc336bSChao Yu /* try to shrink extent cache when there is no enough memory */ 409554df79eSJaegeuk Kim if (!available_free_memory(sbi, EXTENT_CACHE)) 4101dcc336bSChao Yu f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); 4111dcc336bSChao Yu 4121b38dc8eSJaegeuk Kim /* check the # of cached NAT entries */ 4131b38dc8eSJaegeuk Kim if (!available_free_memory(sbi, NAT_ENTRIES)) 4141b38dc8eSJaegeuk Kim try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); 4151b38dc8eSJaegeuk Kim 41631696580SChao Yu if (!available_free_memory(sbi, FREE_NIDS)) 417ad4edb83SJaegeuk Kim try_to_free_nids(sbi, MAX_FREE_NIDS); 418ad4edb83SJaegeuk Kim else 41922ad0b6aSJaegeuk Kim build_free_nids(sbi, false, false); 42031696580SChao Yu 421f455c8a5SJaegeuk Kim if (!is_idle(sbi)) 422f455c8a5SJaegeuk Kim return; 423e5e7ea3cSJaegeuk Kim 42488a70a69SJaegeuk Kim /* checkpoint is the only way to shrink partial cached entries */ 4254660f9c0SJaegeuk Kim if (!available_free_memory(sbi, NAT_ENTRIES) || 42660b99b48SJaegeuk Kim !available_free_memory(sbi, INO_ENTRIES) || 4277d768d2cSChao Yu excess_prefree_segs(sbi) || 4287d768d2cSChao Yu excess_dirty_nats(sbi) || 429f455c8a5SJaegeuk Kim f2fs_time_over(sbi, CP_TIME)) { 430e9f5b8b8SChao Yu if (test_opt(sbi, DATA_FLUSH)) { 431e9f5b8b8SChao Yu struct blk_plug plug; 432e9f5b8b8SChao Yu 433e9f5b8b8SChao Yu blk_start_plug(&plug); 43436b35a0dSChao Yu sync_dirty_inodes(sbi, FILE_INODE); 435e9f5b8b8SChao Yu blk_finish_plug(&plug); 436e9f5b8b8SChao Yu } 4374660f9c0SJaegeuk Kim f2fs_sync_fs(sbi->sb, true); 43842190d2aSJaegeuk Kim stat_inc_bg_cp_count(sbi->stat_info); 4394660f9c0SJaegeuk Kim } 44036b35a0dSChao Yu } 4414660f9c0SJaegeuk Kim 44220fda56bSKinglong Mee static int __submit_flush_wait(struct f2fs_sb_info *sbi, 44320fda56bSKinglong Mee struct block_device *bdev) 4443c62be17SJaegeuk Kim { 4453c62be17SJaegeuk Kim struct bio *bio = f2fs_bio_alloc(0); 4463c62be17SJaegeuk Kim int ret; 4473c62be17SJaegeuk Kim 44809cb6464SLinus Torvalds bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 4493c62be17SJaegeuk Kim bio->bi_bdev = bdev; 4503c62be17SJaegeuk Kim ret = submit_bio_wait(bio); 4513c62be17SJaegeuk Kim bio_put(bio); 45220fda56bSKinglong Mee 45320fda56bSKinglong Mee trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER), 45420fda56bSKinglong Mee test_opt(sbi, FLUSH_MERGE), ret); 4553c62be17SJaegeuk Kim return ret; 4563c62be17SJaegeuk Kim } 4573c62be17SJaegeuk Kim 4583c62be17SJaegeuk Kim static int submit_flush_wait(struct f2fs_sb_info *sbi) 4593c62be17SJaegeuk Kim { 46020fda56bSKinglong Mee int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev); 4613c62be17SJaegeuk Kim int i; 4623c62be17SJaegeuk Kim 46320fda56bSKinglong Mee if (!sbi->s_ndevs || ret) 46420fda56bSKinglong Mee return ret; 46520fda56bSKinglong Mee 4663c62be17SJaegeuk Kim for (i = 1; i < sbi->s_ndevs; i++) { 46720fda56bSKinglong Mee ret = __submit_flush_wait(sbi, FDEV(i).bdev); 4683c62be17SJaegeuk Kim if (ret) 4693c62be17SJaegeuk Kim break; 4703c62be17SJaegeuk Kim } 4713c62be17SJaegeuk Kim return ret; 4723c62be17SJaegeuk Kim } 4733c62be17SJaegeuk Kim 4742163d198SGu Zheng static int issue_flush_thread(void *data) 4756b4afdd7SJaegeuk Kim { 4766b4afdd7SJaegeuk Kim struct f2fs_sb_info *sbi = data; 477b01a9201SJaegeuk Kim struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; 478a688b9d9SGu Zheng wait_queue_head_t *q = &fcc->flush_wait_queue; 4796b4afdd7SJaegeuk Kim repeat: 4806b4afdd7SJaegeuk Kim if (kthread_should_stop()) 4816b4afdd7SJaegeuk Kim return 0; 4826b4afdd7SJaegeuk Kim 483721bd4d5SGu Zheng if (!llist_empty(&fcc->issue_list)) { 4846b4afdd7SJaegeuk Kim struct flush_cmd *cmd, *next; 4856b4afdd7SJaegeuk Kim int ret; 4866b4afdd7SJaegeuk Kim 487721bd4d5SGu Zheng fcc->dispatch_list = llist_del_all(&fcc->issue_list); 488721bd4d5SGu Zheng fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); 489721bd4d5SGu Zheng 4903c62be17SJaegeuk Kim ret = submit_flush_wait(sbi); 4918b8dd65fSChao Yu atomic_inc(&fcc->issued_flush); 4928b8dd65fSChao Yu 493721bd4d5SGu Zheng llist_for_each_entry_safe(cmd, next, 494721bd4d5SGu Zheng fcc->dispatch_list, llnode) { 4956b4afdd7SJaegeuk Kim cmd->ret = ret; 4966b4afdd7SJaegeuk Kim complete(&cmd->wait); 4976b4afdd7SJaegeuk Kim } 498a688b9d9SGu Zheng fcc->dispatch_list = NULL; 4996b4afdd7SJaegeuk Kim } 5006b4afdd7SJaegeuk Kim 501a688b9d9SGu Zheng wait_event_interruptible(*q, 502721bd4d5SGu Zheng kthread_should_stop() || !llist_empty(&fcc->issue_list)); 5036b4afdd7SJaegeuk Kim goto repeat; 5046b4afdd7SJaegeuk Kim } 5056b4afdd7SJaegeuk Kim 5066b4afdd7SJaegeuk Kim int f2fs_issue_flush(struct f2fs_sb_info *sbi) 5076b4afdd7SJaegeuk Kim { 508b01a9201SJaegeuk Kim struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; 509adf8d90bSChao Yu struct flush_cmd cmd; 5108b8dd65fSChao Yu int ret; 5116b4afdd7SJaegeuk Kim 5120f7b2abdSJaegeuk Kim if (test_opt(sbi, NOBARRIER)) 5130f7b2abdSJaegeuk Kim return 0; 5140f7b2abdSJaegeuk Kim 5158b8dd65fSChao Yu if (!test_opt(sbi, FLUSH_MERGE)) { 5163c62be17SJaegeuk Kim ret = submit_flush_wait(sbi); 5178b8dd65fSChao Yu atomic_inc(&fcc->issued_flush); 5188b8dd65fSChao Yu return ret; 5198b8dd65fSChao Yu } 5208b8dd65fSChao Yu 5218b8dd65fSChao Yu if (!atomic_read(&fcc->issing_flush)) { 5228b8dd65fSChao Yu atomic_inc(&fcc->issing_flush); 5238b8dd65fSChao Yu ret = submit_flush_wait(sbi); 5248b8dd65fSChao Yu atomic_dec(&fcc->issing_flush); 5258b8dd65fSChao Yu 5268b8dd65fSChao Yu atomic_inc(&fcc->issued_flush); 527740432f8SJaegeuk Kim return ret; 528740432f8SJaegeuk Kim } 5296b4afdd7SJaegeuk Kim 530adf8d90bSChao Yu init_completion(&cmd.wait); 5316b4afdd7SJaegeuk Kim 5328b8dd65fSChao Yu atomic_inc(&fcc->issing_flush); 533721bd4d5SGu Zheng llist_add(&cmd.llnode, &fcc->issue_list); 5346b4afdd7SJaegeuk Kim 535a688b9d9SGu Zheng if (!fcc->dispatch_list) 536a688b9d9SGu Zheng wake_up(&fcc->flush_wait_queue); 5376b4afdd7SJaegeuk Kim 5385eba8c5dSJaegeuk Kim if (fcc->f2fs_issue_flush) { 539adf8d90bSChao Yu wait_for_completion(&cmd.wait); 5408b8dd65fSChao Yu atomic_dec(&fcc->issing_flush); 5415eba8c5dSJaegeuk Kim } else { 5425eba8c5dSJaegeuk Kim llist_del_all(&fcc->issue_list); 5438b8dd65fSChao Yu atomic_set(&fcc->issing_flush, 0); 5445eba8c5dSJaegeuk Kim } 545adf8d90bSChao Yu 546adf8d90bSChao Yu return cmd.ret; 5476b4afdd7SJaegeuk Kim } 5486b4afdd7SJaegeuk Kim 5492163d198SGu Zheng int create_flush_cmd_control(struct f2fs_sb_info *sbi) 5502163d198SGu Zheng { 5512163d198SGu Zheng dev_t dev = sbi->sb->s_bdev->bd_dev; 5522163d198SGu Zheng struct flush_cmd_control *fcc; 5532163d198SGu Zheng int err = 0; 5542163d198SGu Zheng 555b01a9201SJaegeuk Kim if (SM_I(sbi)->fcc_info) { 556b01a9201SJaegeuk Kim fcc = SM_I(sbi)->fcc_info; 5575eba8c5dSJaegeuk Kim goto init_thread; 5585eba8c5dSJaegeuk Kim } 5595eba8c5dSJaegeuk Kim 5602163d198SGu Zheng fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL); 5612163d198SGu Zheng if (!fcc) 5622163d198SGu Zheng return -ENOMEM; 5638b8dd65fSChao Yu atomic_set(&fcc->issued_flush, 0); 5648b8dd65fSChao Yu atomic_set(&fcc->issing_flush, 0); 5652163d198SGu Zheng init_waitqueue_head(&fcc->flush_wait_queue); 566721bd4d5SGu Zheng init_llist_head(&fcc->issue_list); 567b01a9201SJaegeuk Kim SM_I(sbi)->fcc_info = fcc; 5685eba8c5dSJaegeuk Kim init_thread: 5692163d198SGu Zheng fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, 5702163d198SGu Zheng "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); 5712163d198SGu Zheng if (IS_ERR(fcc->f2fs_issue_flush)) { 5722163d198SGu Zheng err = PTR_ERR(fcc->f2fs_issue_flush); 5732163d198SGu Zheng kfree(fcc); 574b01a9201SJaegeuk Kim SM_I(sbi)->fcc_info = NULL; 5752163d198SGu Zheng return err; 5762163d198SGu Zheng } 5772163d198SGu Zheng 5782163d198SGu Zheng return err; 5792163d198SGu Zheng } 5802163d198SGu Zheng 5815eba8c5dSJaegeuk Kim void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free) 5822163d198SGu Zheng { 583b01a9201SJaegeuk Kim struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; 5842163d198SGu Zheng 5855eba8c5dSJaegeuk Kim if (fcc && fcc->f2fs_issue_flush) { 5865eba8c5dSJaegeuk Kim struct task_struct *flush_thread = fcc->f2fs_issue_flush; 5875eba8c5dSJaegeuk Kim 5885eba8c5dSJaegeuk Kim fcc->f2fs_issue_flush = NULL; 5895eba8c5dSJaegeuk Kim kthread_stop(flush_thread); 5905eba8c5dSJaegeuk Kim } 5915eba8c5dSJaegeuk Kim if (free) { 5922163d198SGu Zheng kfree(fcc); 593b01a9201SJaegeuk Kim SM_I(sbi)->fcc_info = NULL; 5942163d198SGu Zheng } 5955eba8c5dSJaegeuk Kim } 5962163d198SGu Zheng 597351df4b2SJaegeuk Kim static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 598351df4b2SJaegeuk Kim enum dirty_type dirty_type) 599351df4b2SJaegeuk Kim { 600351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 601351df4b2SJaegeuk Kim 602351df4b2SJaegeuk Kim /* need not be added */ 603351df4b2SJaegeuk Kim if (IS_CURSEG(sbi, segno)) 604351df4b2SJaegeuk Kim return; 605351df4b2SJaegeuk Kim 606351df4b2SJaegeuk Kim if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) 607351df4b2SJaegeuk Kim dirty_i->nr_dirty[dirty_type]++; 608351df4b2SJaegeuk Kim 609351df4b2SJaegeuk Kim if (dirty_type == DIRTY) { 610351df4b2SJaegeuk Kim struct seg_entry *sentry = get_seg_entry(sbi, segno); 6114625d6aaSChangman Lee enum dirty_type t = sentry->type; 612b2f2c390SJaegeuk Kim 613ec325b52SJaegeuk Kim if (unlikely(t >= DIRTY)) { 614ec325b52SJaegeuk Kim f2fs_bug_on(sbi, 1); 615ec325b52SJaegeuk Kim return; 616ec325b52SJaegeuk Kim } 6174625d6aaSChangman Lee if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) 6184625d6aaSChangman Lee dirty_i->nr_dirty[t]++; 619351df4b2SJaegeuk Kim } 620351df4b2SJaegeuk Kim } 621351df4b2SJaegeuk Kim 622351df4b2SJaegeuk Kim static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 623351df4b2SJaegeuk Kim enum dirty_type dirty_type) 624351df4b2SJaegeuk Kim { 625351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 626351df4b2SJaegeuk Kim 627351df4b2SJaegeuk Kim if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) 628351df4b2SJaegeuk Kim dirty_i->nr_dirty[dirty_type]--; 629351df4b2SJaegeuk Kim 630351df4b2SJaegeuk Kim if (dirty_type == DIRTY) { 6314625d6aaSChangman Lee struct seg_entry *sentry = get_seg_entry(sbi, segno); 6324625d6aaSChangman Lee enum dirty_type t = sentry->type; 633b2f2c390SJaegeuk Kim 6344625d6aaSChangman Lee if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) 635b2f2c390SJaegeuk Kim dirty_i->nr_dirty[t]--; 636b2f2c390SJaegeuk Kim 637302bd348SJaegeuk Kim if (get_valid_blocks(sbi, segno, true) == 0) 6384ddb1a4dSJaegeuk Kim clear_bit(GET_SEC_FROM_SEG(sbi, segno), 6395ec4e49fSJaegeuk Kim dirty_i->victim_secmap); 640351df4b2SJaegeuk Kim } 641351df4b2SJaegeuk Kim } 642351df4b2SJaegeuk Kim 6430a8165d7SJaegeuk Kim /* 644351df4b2SJaegeuk Kim * Should not occur error such as -ENOMEM. 645351df4b2SJaegeuk Kim * Adding dirty entry into seglist is not critical operation. 646351df4b2SJaegeuk Kim * If a given segment is one of current working segments, it won't be added. 647351df4b2SJaegeuk Kim */ 6488d8451afSHaicheng Li static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) 649351df4b2SJaegeuk Kim { 650351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 651351df4b2SJaegeuk Kim unsigned short valid_blocks; 652351df4b2SJaegeuk Kim 653351df4b2SJaegeuk Kim if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) 654351df4b2SJaegeuk Kim return; 655351df4b2SJaegeuk Kim 656351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 657351df4b2SJaegeuk Kim 658302bd348SJaegeuk Kim valid_blocks = get_valid_blocks(sbi, segno, false); 659351df4b2SJaegeuk Kim 660351df4b2SJaegeuk Kim if (valid_blocks == 0) { 661351df4b2SJaegeuk Kim __locate_dirty_segment(sbi, segno, PRE); 662351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, segno, DIRTY); 663351df4b2SJaegeuk Kim } else if (valid_blocks < sbi->blocks_per_seg) { 664351df4b2SJaegeuk Kim __locate_dirty_segment(sbi, segno, DIRTY); 665351df4b2SJaegeuk Kim } else { 666351df4b2SJaegeuk Kim /* Recovery routine with SSR needs this */ 667351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, segno, DIRTY); 668351df4b2SJaegeuk Kim } 669351df4b2SJaegeuk Kim 670351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 671351df4b2SJaegeuk Kim } 672351df4b2SJaegeuk Kim 673004b6862SChao Yu static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi, 674c81abe34SJaegeuk Kim struct block_device *bdev, block_t lstart, 675c81abe34SJaegeuk Kim block_t start, block_t len) 676275b66b0SChao Yu { 6770b54fb84SJaegeuk Kim struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 678ba48a33eSChao Yu struct list_head *pend_list; 679b01a9201SJaegeuk Kim struct discard_cmd *dc; 680275b66b0SChao Yu 681ba48a33eSChao Yu f2fs_bug_on(sbi, !len); 682ba48a33eSChao Yu 683ba48a33eSChao Yu pend_list = &dcc->pend_list[plist_idx(len)]; 684ba48a33eSChao Yu 685b01a9201SJaegeuk Kim dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS); 686b01a9201SJaegeuk Kim INIT_LIST_HEAD(&dc->list); 687c81abe34SJaegeuk Kim dc->bdev = bdev; 688b01a9201SJaegeuk Kim dc->lstart = lstart; 689c81abe34SJaegeuk Kim dc->start = start; 690b01a9201SJaegeuk Kim dc->len = len; 691*ec9895adSChao Yu dc->ref = 0; 69215469963SJaegeuk Kim dc->state = D_PREP; 693c81abe34SJaegeuk Kim dc->error = 0; 694b01a9201SJaegeuk Kim init_completion(&dc->wait); 69522d375ddSChao Yu list_add_tail(&dc->list, pend_list); 6965f32366aSChao Yu atomic_inc(&dcc->discard_cmd_cnt); 697d84d1cbdSChao Yu dcc->undiscard_blks += len; 698004b6862SChao Yu 699004b6862SChao Yu return dc; 70015469963SJaegeuk Kim } 70115469963SJaegeuk Kim 702004b6862SChao Yu static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi, 703004b6862SChao Yu struct block_device *bdev, block_t lstart, 704004b6862SChao Yu block_t start, block_t len, 705004b6862SChao Yu struct rb_node *parent, struct rb_node **p) 706004b6862SChao Yu { 707004b6862SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 708004b6862SChao Yu struct discard_cmd *dc; 709004b6862SChao Yu 710004b6862SChao Yu dc = __create_discard_cmd(sbi, bdev, lstart, start, len); 711004b6862SChao Yu 712004b6862SChao Yu rb_link_node(&dc->rb_node, parent, p); 713004b6862SChao Yu rb_insert_color(&dc->rb_node, &dcc->root); 714004b6862SChao Yu 715004b6862SChao Yu return dc; 716004b6862SChao Yu } 717004b6862SChao Yu 718004b6862SChao Yu static void __detach_discard_cmd(struct discard_cmd_control *dcc, 719004b6862SChao Yu struct discard_cmd *dc) 72015469963SJaegeuk Kim { 721dcc9165dSJaegeuk Kim if (dc->state == D_DONE) 722004b6862SChao Yu atomic_dec(&dcc->issing_discard); 723004b6862SChao Yu 724004b6862SChao Yu list_del(&dc->list); 725004b6862SChao Yu rb_erase(&dc->rb_node, &dcc->root); 726d84d1cbdSChao Yu dcc->undiscard_blks -= dc->len; 727004b6862SChao Yu 728004b6862SChao Yu kmem_cache_free(discard_cmd_slab, dc); 729004b6862SChao Yu 730004b6862SChao Yu atomic_dec(&dcc->discard_cmd_cnt); 731004b6862SChao Yu } 732004b6862SChao Yu 733004b6862SChao Yu static void __remove_discard_cmd(struct f2fs_sb_info *sbi, 734004b6862SChao Yu struct discard_cmd *dc) 735004b6862SChao Yu { 736004b6862SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 737dcc9165dSJaegeuk Kim 738c81abe34SJaegeuk Kim if (dc->error == -EOPNOTSUPP) 739c81abe34SJaegeuk Kim dc->error = 0; 74015469963SJaegeuk Kim 741c81abe34SJaegeuk Kim if (dc->error) 74215469963SJaegeuk Kim f2fs_msg(sbi->sb, KERN_INFO, 743c81abe34SJaegeuk Kim "Issue discard failed, ret: %d", dc->error); 744004b6862SChao Yu __detach_discard_cmd(dcc, dc); 745275b66b0SChao Yu } 746275b66b0SChao Yu 747c81abe34SJaegeuk Kim static void f2fs_submit_discard_endio(struct bio *bio) 748c81abe34SJaegeuk Kim { 749c81abe34SJaegeuk Kim struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private; 750c81abe34SJaegeuk Kim 751c81abe34SJaegeuk Kim dc->error = bio->bi_error; 752c81abe34SJaegeuk Kim dc->state = D_DONE; 753fa64a003SChao Yu complete(&dc->wait); 754c81abe34SJaegeuk Kim bio_put(bio); 755c81abe34SJaegeuk Kim } 756c81abe34SJaegeuk Kim 757c81abe34SJaegeuk Kim /* this function is copied from blkdev_issue_discard from block/blk-lib.c */ 758c81abe34SJaegeuk Kim static void __submit_discard_cmd(struct f2fs_sb_info *sbi, 759c81abe34SJaegeuk Kim struct discard_cmd *dc) 760c81abe34SJaegeuk Kim { 761c81abe34SJaegeuk Kim struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 762c81abe34SJaegeuk Kim struct bio *bio = NULL; 763c81abe34SJaegeuk Kim 764c81abe34SJaegeuk Kim if (dc->state != D_PREP) 765c81abe34SJaegeuk Kim return; 766c81abe34SJaegeuk Kim 7670243a5f9SChao Yu trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len); 7680243a5f9SChao Yu 769c81abe34SJaegeuk Kim dc->error = __blkdev_issue_discard(dc->bdev, 770c81abe34SJaegeuk Kim SECTOR_FROM_BLOCK(dc->start), 771c81abe34SJaegeuk Kim SECTOR_FROM_BLOCK(dc->len), 772c81abe34SJaegeuk Kim GFP_NOFS, 0, &bio); 773c81abe34SJaegeuk Kim if (!dc->error) { 774c81abe34SJaegeuk Kim /* should keep before submission to avoid D_DONE right away */ 775c81abe34SJaegeuk Kim dc->state = D_SUBMIT; 7768b8dd65fSChao Yu atomic_inc(&dcc->issued_discard); 7778b8dd65fSChao Yu atomic_inc(&dcc->issing_discard); 778c81abe34SJaegeuk Kim if (bio) { 779c81abe34SJaegeuk Kim bio->bi_private = dc; 780c81abe34SJaegeuk Kim bio->bi_end_io = f2fs_submit_discard_endio; 781c81abe34SJaegeuk Kim bio->bi_opf |= REQ_SYNC; 782c81abe34SJaegeuk Kim submit_bio(bio); 78346f84c2cSChao Yu list_move_tail(&dc->list, &dcc->wait_list); 784c81abe34SJaegeuk Kim } 785c81abe34SJaegeuk Kim } else { 786c81abe34SJaegeuk Kim __remove_discard_cmd(sbi, dc); 787c81abe34SJaegeuk Kim } 788c81abe34SJaegeuk Kim } 789c81abe34SJaegeuk Kim 790004b6862SChao Yu static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi, 791004b6862SChao Yu struct block_device *bdev, block_t lstart, 792004b6862SChao Yu block_t start, block_t len, 793004b6862SChao Yu struct rb_node **insert_p, 794004b6862SChao Yu struct rb_node *insert_parent) 795004b6862SChao Yu { 796004b6862SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 797004b6862SChao Yu struct rb_node **p = &dcc->root.rb_node; 798004b6862SChao Yu struct rb_node *parent = NULL; 799004b6862SChao Yu struct discard_cmd *dc = NULL; 800004b6862SChao Yu 801004b6862SChao Yu if (insert_p && insert_parent) { 802004b6862SChao Yu parent = insert_parent; 803004b6862SChao Yu p = insert_p; 804004b6862SChao Yu goto do_insert; 805004b6862SChao Yu } 806004b6862SChao Yu 807004b6862SChao Yu p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart); 808004b6862SChao Yu do_insert: 809004b6862SChao Yu dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p); 810004b6862SChao Yu if (!dc) 811004b6862SChao Yu return NULL; 812004b6862SChao Yu 813004b6862SChao Yu return dc; 814004b6862SChao Yu } 815004b6862SChao Yu 816ba48a33eSChao Yu static void __relocate_discard_cmd(struct discard_cmd_control *dcc, 817ba48a33eSChao Yu struct discard_cmd *dc) 818ba48a33eSChao Yu { 819ba48a33eSChao Yu list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]); 820ba48a33eSChao Yu } 821ba48a33eSChao Yu 822004b6862SChao Yu static void __punch_discard_cmd(struct f2fs_sb_info *sbi, 823004b6862SChao Yu struct discard_cmd *dc, block_t blkaddr) 824004b6862SChao Yu { 825ba48a33eSChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 826004b6862SChao Yu struct discard_info di = dc->di; 827004b6862SChao Yu bool modified = false; 828004b6862SChao Yu 829004b6862SChao Yu if (dc->state == D_DONE || dc->len == 1) { 830004b6862SChao Yu __remove_discard_cmd(sbi, dc); 831004b6862SChao Yu return; 832004b6862SChao Yu } 833004b6862SChao Yu 834d84d1cbdSChao Yu dcc->undiscard_blks -= di.len; 835d84d1cbdSChao Yu 836004b6862SChao Yu if (blkaddr > di.lstart) { 837004b6862SChao Yu dc->len = blkaddr - dc->lstart; 838d84d1cbdSChao Yu dcc->undiscard_blks += dc->len; 839ba48a33eSChao Yu __relocate_discard_cmd(dcc, dc); 840df0f6b44SChao Yu f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root)); 841004b6862SChao Yu modified = true; 842004b6862SChao Yu } 843004b6862SChao Yu 844004b6862SChao Yu if (blkaddr < di.lstart + di.len - 1) { 845004b6862SChao Yu if (modified) { 846004b6862SChao Yu __insert_discard_tree(sbi, dc->bdev, blkaddr + 1, 847004b6862SChao Yu di.start + blkaddr + 1 - di.lstart, 848004b6862SChao Yu di.lstart + di.len - 1 - blkaddr, 849004b6862SChao Yu NULL, NULL); 850df0f6b44SChao Yu f2fs_bug_on(sbi, 851df0f6b44SChao Yu !__check_rb_tree_consistence(sbi, &dcc->root)); 852004b6862SChao Yu } else { 853004b6862SChao Yu dc->lstart++; 854004b6862SChao Yu dc->len--; 855004b6862SChao Yu dc->start++; 856d84d1cbdSChao Yu dcc->undiscard_blks += dc->len; 857ba48a33eSChao Yu __relocate_discard_cmd(dcc, dc); 858df0f6b44SChao Yu f2fs_bug_on(sbi, 859df0f6b44SChao Yu !__check_rb_tree_consistence(sbi, &dcc->root)); 860004b6862SChao Yu } 861004b6862SChao Yu } 862004b6862SChao Yu } 863004b6862SChao Yu 864004b6862SChao Yu static void __update_discard_tree_range(struct f2fs_sb_info *sbi, 865004b6862SChao Yu struct block_device *bdev, block_t lstart, 866004b6862SChao Yu block_t start, block_t len) 867004b6862SChao Yu { 868004b6862SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 869004b6862SChao Yu struct discard_cmd *prev_dc = NULL, *next_dc = NULL; 870004b6862SChao Yu struct discard_cmd *dc; 871004b6862SChao Yu struct discard_info di = {0}; 872004b6862SChao Yu struct rb_node **insert_p = NULL, *insert_parent = NULL; 873004b6862SChao Yu block_t end = lstart + len; 874004b6862SChao Yu 875004b6862SChao Yu mutex_lock(&dcc->cmd_lock); 876004b6862SChao Yu 877004b6862SChao Yu dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root, 878004b6862SChao Yu NULL, lstart, 879004b6862SChao Yu (struct rb_entry **)&prev_dc, 880004b6862SChao Yu (struct rb_entry **)&next_dc, 881004b6862SChao Yu &insert_p, &insert_parent, true); 882004b6862SChao Yu if (dc) 883004b6862SChao Yu prev_dc = dc; 884004b6862SChao Yu 885004b6862SChao Yu if (!prev_dc) { 886004b6862SChao Yu di.lstart = lstart; 887004b6862SChao Yu di.len = next_dc ? next_dc->lstart - lstart : len; 888004b6862SChao Yu di.len = min(di.len, len); 889004b6862SChao Yu di.start = start; 890004b6862SChao Yu } 891004b6862SChao Yu 892004b6862SChao Yu while (1) { 893004b6862SChao Yu struct rb_node *node; 894004b6862SChao Yu bool merged = false; 895004b6862SChao Yu struct discard_cmd *tdc = NULL; 896004b6862SChao Yu 897004b6862SChao Yu if (prev_dc) { 898004b6862SChao Yu di.lstart = prev_dc->lstart + prev_dc->len; 899004b6862SChao Yu if (di.lstart < lstart) 900004b6862SChao Yu di.lstart = lstart; 901004b6862SChao Yu if (di.lstart >= end) 902004b6862SChao Yu break; 903004b6862SChao Yu 904004b6862SChao Yu if (!next_dc || next_dc->lstart > end) 905004b6862SChao Yu di.len = end - di.lstart; 906004b6862SChao Yu else 907004b6862SChao Yu di.len = next_dc->lstart - di.lstart; 908004b6862SChao Yu di.start = start + di.lstart - lstart; 909004b6862SChao Yu } 910004b6862SChao Yu 911004b6862SChao Yu if (!di.len) 912004b6862SChao Yu goto next; 913004b6862SChao Yu 914004b6862SChao Yu if (prev_dc && prev_dc->state == D_PREP && 915004b6862SChao Yu prev_dc->bdev == bdev && 916004b6862SChao Yu __is_discard_back_mergeable(&di, &prev_dc->di)) { 917004b6862SChao Yu prev_dc->di.len += di.len; 918d84d1cbdSChao Yu dcc->undiscard_blks += di.len; 919ba48a33eSChao Yu __relocate_discard_cmd(dcc, prev_dc); 920df0f6b44SChao Yu f2fs_bug_on(sbi, 921df0f6b44SChao Yu !__check_rb_tree_consistence(sbi, &dcc->root)); 922004b6862SChao Yu di = prev_dc->di; 923004b6862SChao Yu tdc = prev_dc; 924004b6862SChao Yu merged = true; 925004b6862SChao Yu } 926004b6862SChao Yu 927004b6862SChao Yu if (next_dc && next_dc->state == D_PREP && 928004b6862SChao Yu next_dc->bdev == bdev && 929004b6862SChao Yu __is_discard_front_mergeable(&di, &next_dc->di)) { 930004b6862SChao Yu next_dc->di.lstart = di.lstart; 931004b6862SChao Yu next_dc->di.len += di.len; 932004b6862SChao Yu next_dc->di.start = di.start; 933d84d1cbdSChao Yu dcc->undiscard_blks += di.len; 934ba48a33eSChao Yu __relocate_discard_cmd(dcc, next_dc); 935004b6862SChao Yu if (tdc) 936004b6862SChao Yu __remove_discard_cmd(sbi, tdc); 937df0f6b44SChao Yu f2fs_bug_on(sbi, 938df0f6b44SChao Yu !__check_rb_tree_consistence(sbi, &dcc->root)); 939004b6862SChao Yu merged = true; 940004b6862SChao Yu } 941004b6862SChao Yu 942df0f6b44SChao Yu if (!merged) { 943004b6862SChao Yu __insert_discard_tree(sbi, bdev, di.lstart, di.start, 944004b6862SChao Yu di.len, NULL, NULL); 945df0f6b44SChao Yu f2fs_bug_on(sbi, 946df0f6b44SChao Yu !__check_rb_tree_consistence(sbi, &dcc->root)); 947df0f6b44SChao Yu } 948004b6862SChao Yu next: 949004b6862SChao Yu prev_dc = next_dc; 950004b6862SChao Yu if (!prev_dc) 951004b6862SChao Yu break; 952004b6862SChao Yu 953004b6862SChao Yu node = rb_next(&prev_dc->rb_node); 954004b6862SChao Yu next_dc = rb_entry_safe(node, struct discard_cmd, rb_node); 955004b6862SChao Yu } 956004b6862SChao Yu 957004b6862SChao Yu mutex_unlock(&dcc->cmd_lock); 958004b6862SChao Yu } 959004b6862SChao Yu 960c81abe34SJaegeuk Kim static int __queue_discard_cmd(struct f2fs_sb_info *sbi, 961c81abe34SJaegeuk Kim struct block_device *bdev, block_t blkstart, block_t blklen) 962c81abe34SJaegeuk Kim { 963c81abe34SJaegeuk Kim block_t lblkstart = blkstart; 964c81abe34SJaegeuk Kim 9650243a5f9SChao Yu trace_f2fs_queue_discard(bdev, blkstart, blklen); 966c81abe34SJaegeuk Kim 967c81abe34SJaegeuk Kim if (sbi->s_ndevs) { 968c81abe34SJaegeuk Kim int devi = f2fs_target_device_index(sbi, blkstart); 969c81abe34SJaegeuk Kim 970c81abe34SJaegeuk Kim blkstart -= FDEV(devi).start_blk; 971c81abe34SJaegeuk Kim } 972004b6862SChao Yu __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen); 973c81abe34SJaegeuk Kim return 0; 974c81abe34SJaegeuk Kim } 975c81abe34SJaegeuk Kim 976bd5b0738SChao Yu static void __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond) 977bd5b0738SChao Yu { 978bd5b0738SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 979bd5b0738SChao Yu struct list_head *pend_list; 980bd5b0738SChao Yu struct discard_cmd *dc, *tmp; 981bd5b0738SChao Yu struct blk_plug plug; 982bd5b0738SChao Yu int i, iter = 0; 983bd5b0738SChao Yu 984bd5b0738SChao Yu mutex_lock(&dcc->cmd_lock); 985bd5b0738SChao Yu blk_start_plug(&plug); 986bd5b0738SChao Yu for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { 987bd5b0738SChao Yu pend_list = &dcc->pend_list[i]; 988bd5b0738SChao Yu list_for_each_entry_safe(dc, tmp, pend_list, list) { 989bd5b0738SChao Yu f2fs_bug_on(sbi, dc->state != D_PREP); 990bd5b0738SChao Yu 991bd5b0738SChao Yu if (!issue_cond || is_idle(sbi)) 992bd5b0738SChao Yu __submit_discard_cmd(sbi, dc); 993bd5b0738SChao Yu if (issue_cond && iter++ > DISCARD_ISSUE_RATE) 994bd5b0738SChao Yu goto out; 995bd5b0738SChao Yu } 996bd5b0738SChao Yu } 997bd5b0738SChao Yu out: 998bd5b0738SChao Yu blk_finish_plug(&plug); 999bd5b0738SChao Yu mutex_unlock(&dcc->cmd_lock); 1000bd5b0738SChao Yu } 1001bd5b0738SChao Yu 100263a94fa1SChao Yu static void __wait_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond) 100363a94fa1SChao Yu { 100463a94fa1SChao Yu struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 100563a94fa1SChao Yu struct list_head *wait_list = &(dcc->wait_list); 100663a94fa1SChao Yu struct discard_cmd *dc, *tmp; 100763a94fa1SChao Yu 100863a94fa1SChao Yu mutex_lock(&dcc->cmd_lock); 100963a94fa1SChao Yu list_for_each_entry_safe(dc, tmp, wait_list, list) { 101063a94fa1SChao Yu if (!wait_cond || dc->state == D_DONE) { 1011*ec9895adSChao Yu if (dc->ref) 1012*ec9895adSChao Yu continue; 101363a94fa1SChao Yu wait_for_completion_io(&dc->wait); 101463a94fa1SChao Yu __remove_discard_cmd(sbi, dc); 101563a94fa1SChao Yu } 101663a94fa1SChao Yu } 101763a94fa1SChao Yu mutex_unlock(&dcc->cmd_lock); 101863a94fa1SChao Yu } 101963a94fa1SChao Yu 10204e6a8d9bSJaegeuk Kim /* This should be covered by global mutex, &sit_i->sentry_lock */ 10214e6a8d9bSJaegeuk Kim void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr) 1022275b66b0SChao Yu { 10230b54fb84SJaegeuk Kim struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 1024004b6862SChao Yu struct discard_cmd *dc; 1025*ec9895adSChao Yu bool need_wait = false; 1026275b66b0SChao Yu 102715469963SJaegeuk Kim mutex_lock(&dcc->cmd_lock); 1028004b6862SChao Yu dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr); 1029004b6862SChao Yu if (dc) { 1030*ec9895adSChao Yu if (dc->state == D_PREP) { 10313d6a650fSYunlei He __punch_discard_cmd(sbi, dc, blkaddr); 1032*ec9895adSChao Yu } else { 1033*ec9895adSChao Yu dc->ref++; 1034*ec9895adSChao Yu need_wait = true; 1035275b66b0SChao Yu } 1036*ec9895adSChao Yu } 1037d431413fSChao Yu mutex_unlock(&dcc->cmd_lock); 1038*ec9895adSChao Yu 1039*ec9895adSChao Yu if (need_wait) { 1040*ec9895adSChao Yu wait_for_completion_io(&dc->wait); 1041*ec9895adSChao Yu mutex_lock(&dcc->cmd_lock); 1042*ec9895adSChao Yu f2fs_bug_on(sbi, dc->state != D_DONE); 1043*ec9895adSChao Yu dc->ref--; 1044*ec9895adSChao Yu if (!dc->ref) 1045*ec9895adSChao Yu __remove_discard_cmd(sbi, dc); 1046*ec9895adSChao Yu mutex_unlock(&dcc->cmd_lock); 1047*ec9895adSChao Yu } 1048d431413fSChao Yu } 1049d431413fSChao Yu 1050d431413fSChao Yu /* This comes from f2fs_put_super */ 1051d431413fSChao Yu void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi) 1052d431413fSChao Yu { 1053bd5b0738SChao Yu __issue_discard_cmd(sbi, false); 105463a94fa1SChao Yu __wait_discard_cmd(sbi, false); 105515469963SJaegeuk Kim } 1056275b66b0SChao Yu 105715469963SJaegeuk Kim static int issue_discard_thread(void *data) 105815469963SJaegeuk Kim { 105915469963SJaegeuk Kim struct f2fs_sb_info *sbi = data; 106015469963SJaegeuk Kim struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 106115469963SJaegeuk Kim wait_queue_head_t *q = &dcc->discard_wait_queue; 106215469963SJaegeuk Kim repeat: 106315469963SJaegeuk Kim if (kthread_should_stop()) 106415469963SJaegeuk Kim return 0; 106515469963SJaegeuk Kim 1066bd5b0738SChao Yu __issue_discard_cmd(sbi, true); 106763a94fa1SChao Yu __wait_discard_cmd(sbi, true); 106815469963SJaegeuk Kim 106915469963SJaegeuk Kim congestion_wait(BLK_RW_SYNC, HZ/50); 107015469963SJaegeuk Kim 107122d375ddSChao Yu wait_event_interruptible(*q, kthread_should_stop() || 1072ba48a33eSChao Yu atomic_read(&dcc->discard_cmd_cnt)); 107315469963SJaegeuk Kim goto repeat; 107415469963SJaegeuk Kim } 107515469963SJaegeuk Kim 1076f46e8809SDamien Le Moal #ifdef CONFIG_BLK_DEV_ZONED 10773c62be17SJaegeuk Kim static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, 10783c62be17SJaegeuk Kim struct block_device *bdev, block_t blkstart, block_t blklen) 1079f46e8809SDamien Le Moal { 108092592285SJaegeuk Kim sector_t sector, nr_sects; 108110a875f8SKinglong Mee block_t lblkstart = blkstart; 10823c62be17SJaegeuk Kim int devi = 0; 1083f46e8809SDamien Le Moal 10843c62be17SJaegeuk Kim if (sbi->s_ndevs) { 10853c62be17SJaegeuk Kim devi = f2fs_target_device_index(sbi, blkstart); 10863c62be17SJaegeuk Kim blkstart -= FDEV(devi).start_blk; 10873c62be17SJaegeuk Kim } 1088f46e8809SDamien Le Moal 1089f46e8809SDamien Le Moal /* 1090f46e8809SDamien Le Moal * We need to know the type of the zone: for conventional zones, 1091f46e8809SDamien Le Moal * use regular discard if the drive supports it. For sequential 1092f46e8809SDamien Le Moal * zones, reset the zone write pointer. 1093f46e8809SDamien Le Moal */ 10943c62be17SJaegeuk Kim switch (get_blkz_type(sbi, bdev, blkstart)) { 1095f46e8809SDamien Le Moal 1096f46e8809SDamien Le Moal case BLK_ZONE_TYPE_CONVENTIONAL: 1097f46e8809SDamien Le Moal if (!blk_queue_discard(bdev_get_queue(bdev))) 1098f46e8809SDamien Le Moal return 0; 1099c81abe34SJaegeuk Kim return __queue_discard_cmd(sbi, bdev, lblkstart, blklen); 1100f46e8809SDamien Le Moal case BLK_ZONE_TYPE_SEQWRITE_REQ: 1101f46e8809SDamien Le Moal case BLK_ZONE_TYPE_SEQWRITE_PREF: 110292592285SJaegeuk Kim sector = SECTOR_FROM_BLOCK(blkstart); 110392592285SJaegeuk Kim nr_sects = SECTOR_FROM_BLOCK(blklen); 110492592285SJaegeuk Kim 110592592285SJaegeuk Kim if (sector & (bdev_zone_sectors(bdev) - 1) || 110692592285SJaegeuk Kim nr_sects != bdev_zone_sectors(bdev)) { 110792592285SJaegeuk Kim f2fs_msg(sbi->sb, KERN_INFO, 110892592285SJaegeuk Kim "(%d) %s: Unaligned discard attempted (block %x + %x)", 110992592285SJaegeuk Kim devi, sbi->s_ndevs ? FDEV(devi).path: "", 111092592285SJaegeuk Kim blkstart, blklen); 111192592285SJaegeuk Kim return -EIO; 111292592285SJaegeuk Kim } 1113d50aaeecSJaegeuk Kim trace_f2fs_issue_reset_zone(bdev, blkstart); 1114f46e8809SDamien Le Moal return blkdev_reset_zones(bdev, sector, 1115f46e8809SDamien Le Moal nr_sects, GFP_NOFS); 1116f46e8809SDamien Le Moal default: 1117f46e8809SDamien Le Moal /* Unknown zone type: broken device ? */ 1118f46e8809SDamien Le Moal return -EIO; 1119f46e8809SDamien Le Moal } 1120f46e8809SDamien Le Moal } 1121f46e8809SDamien Le Moal #endif 1122f46e8809SDamien Le Moal 11233c62be17SJaegeuk Kim static int __issue_discard_async(struct f2fs_sb_info *sbi, 11243c62be17SJaegeuk Kim struct block_device *bdev, block_t blkstart, block_t blklen) 11253c62be17SJaegeuk Kim { 11263c62be17SJaegeuk Kim #ifdef CONFIG_BLK_DEV_ZONED 11273c62be17SJaegeuk Kim if (f2fs_sb_mounted_blkzoned(sbi->sb) && 11283c62be17SJaegeuk Kim bdev_zoned_model(bdev) != BLK_ZONED_NONE) 11293c62be17SJaegeuk Kim return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen); 11303c62be17SJaegeuk Kim #endif 1131c81abe34SJaegeuk Kim return __queue_discard_cmd(sbi, bdev, blkstart, blklen); 11323c62be17SJaegeuk Kim } 11333c62be17SJaegeuk Kim 11341e87a78dSJaegeuk Kim static int f2fs_issue_discard(struct f2fs_sb_info *sbi, 113537208879SJaegeuk Kim block_t blkstart, block_t blklen) 113637208879SJaegeuk Kim { 11373c62be17SJaegeuk Kim sector_t start = blkstart, len = 0; 11383c62be17SJaegeuk Kim struct block_device *bdev; 1139a66cdd98SJaegeuk Kim struct seg_entry *se; 1140a66cdd98SJaegeuk Kim unsigned int offset; 1141a66cdd98SJaegeuk Kim block_t i; 11423c62be17SJaegeuk Kim int err = 0; 1143a66cdd98SJaegeuk Kim 11443c62be17SJaegeuk Kim bdev = f2fs_target_device(sbi, blkstart, NULL); 11453c62be17SJaegeuk Kim 11463c62be17SJaegeuk Kim for (i = blkstart; i < blkstart + blklen; i++, len++) { 11473c62be17SJaegeuk Kim if (i != start) { 11483c62be17SJaegeuk Kim struct block_device *bdev2 = 11493c62be17SJaegeuk Kim f2fs_target_device(sbi, i, NULL); 11503c62be17SJaegeuk Kim 11513c62be17SJaegeuk Kim if (bdev2 != bdev) { 11523c62be17SJaegeuk Kim err = __issue_discard_async(sbi, bdev, 11533c62be17SJaegeuk Kim start, len); 11543c62be17SJaegeuk Kim if (err) 11553c62be17SJaegeuk Kim return err; 11563c62be17SJaegeuk Kim bdev = bdev2; 11573c62be17SJaegeuk Kim start = i; 11583c62be17SJaegeuk Kim len = 0; 11593c62be17SJaegeuk Kim } 11603c62be17SJaegeuk Kim } 11613c62be17SJaegeuk Kim 1162a66cdd98SJaegeuk Kim se = get_seg_entry(sbi, GET_SEGNO(sbi, i)); 1163a66cdd98SJaegeuk Kim offset = GET_BLKOFF_FROM_SEG0(sbi, i); 1164a66cdd98SJaegeuk Kim 1165a66cdd98SJaegeuk Kim if (!f2fs_test_and_set_bit(offset, se->discard_map)) 1166a66cdd98SJaegeuk Kim sbi->discard_blks--; 1167a66cdd98SJaegeuk Kim } 1168f46e8809SDamien Le Moal 11693c62be17SJaegeuk Kim if (len) 11703c62be17SJaegeuk Kim err = __issue_discard_async(sbi, bdev, start, len); 11713c62be17SJaegeuk Kim return err; 11721e87a78dSJaegeuk Kim } 11731e87a78dSJaegeuk Kim 117425290fa5SJaegeuk Kim static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc, 117525290fa5SJaegeuk Kim bool check_only) 1176adf4983bSJaegeuk Kim { 1177b2955550SJaegeuk Kim int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 1178b2955550SJaegeuk Kim int max_blocks = sbi->blocks_per_seg; 11794b2fecc8SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); 1180b2955550SJaegeuk Kim unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 1181b2955550SJaegeuk Kim unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 1182a66cdd98SJaegeuk Kim unsigned long *discard_map = (unsigned long *)se->discard_map; 118360a3b782SJaegeuk Kim unsigned long *dmap = SIT_I(sbi)->tmp_map; 1184b2955550SJaegeuk Kim unsigned int start = 0, end = -1; 11854b2fecc8SJaegeuk Kim bool force = (cpc->reason == CP_DISCARD); 1186a7eeb823SChao Yu struct discard_entry *de = NULL; 118746f84c2cSChao Yu struct list_head *head = &SM_I(sbi)->dcc_info->entry_list; 1188b2955550SJaegeuk Kim int i; 1189b2955550SJaegeuk Kim 11903e025740SJaegeuk Kim if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi)) 119125290fa5SJaegeuk Kim return false; 1192b2955550SJaegeuk Kim 1193a66cdd98SJaegeuk Kim if (!force) { 1194a66cdd98SJaegeuk Kim if (!test_opt(sbi, DISCARD) || !se->valid_blocks || 11950b54fb84SJaegeuk Kim SM_I(sbi)->dcc_info->nr_discards >= 11960b54fb84SJaegeuk Kim SM_I(sbi)->dcc_info->max_discards) 119725290fa5SJaegeuk Kim return false; 11984b2fecc8SJaegeuk Kim } 1199b2955550SJaegeuk Kim 1200b2955550SJaegeuk Kim /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ 1201b2955550SJaegeuk Kim for (i = 0; i < entries; i++) 1202a66cdd98SJaegeuk Kim dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] : 1203d7bc2484SJaegeuk Kim (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; 1204b2955550SJaegeuk Kim 12050b54fb84SJaegeuk Kim while (force || SM_I(sbi)->dcc_info->nr_discards <= 12060b54fb84SJaegeuk Kim SM_I(sbi)->dcc_info->max_discards) { 1207b2955550SJaegeuk Kim start = __find_rev_next_bit(dmap, max_blocks, end + 1); 1208b2955550SJaegeuk Kim if (start >= max_blocks) 1209b2955550SJaegeuk Kim break; 1210b2955550SJaegeuk Kim 1211b2955550SJaegeuk Kim end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1); 1212c7b41e16SYunlei He if (force && start && end != max_blocks 1213c7b41e16SYunlei He && (end - start) < cpc->trim_minlen) 1214c7b41e16SYunlei He continue; 1215c7b41e16SYunlei He 121625290fa5SJaegeuk Kim if (check_only) 121725290fa5SJaegeuk Kim return true; 121825290fa5SJaegeuk Kim 1219a7eeb823SChao Yu if (!de) { 1220a7eeb823SChao Yu de = f2fs_kmem_cache_alloc(discard_entry_slab, 1221a7eeb823SChao Yu GFP_F2FS_ZERO); 1222a7eeb823SChao Yu de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start); 1223a7eeb823SChao Yu list_add_tail(&de->list, head); 1224a7eeb823SChao Yu } 1225a7eeb823SChao Yu 1226a7eeb823SChao Yu for (i = start; i < end; i++) 1227a7eeb823SChao Yu __set_bit_le(i, (void *)de->discard_map); 1228a7eeb823SChao Yu 1229a7eeb823SChao Yu SM_I(sbi)->dcc_info->nr_discards += end - start; 1230b2955550SJaegeuk Kim } 123125290fa5SJaegeuk Kim return false; 1232b2955550SJaegeuk Kim } 1233b2955550SJaegeuk Kim 12344b2fecc8SJaegeuk Kim void release_discard_addrs(struct f2fs_sb_info *sbi) 12354b2fecc8SJaegeuk Kim { 123646f84c2cSChao Yu struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list); 12374b2fecc8SJaegeuk Kim struct discard_entry *entry, *this; 12384b2fecc8SJaegeuk Kim 12394b2fecc8SJaegeuk Kim /* drop caches */ 12404b2fecc8SJaegeuk Kim list_for_each_entry_safe(entry, this, head, list) { 12414b2fecc8SJaegeuk Kim list_del(&entry->list); 12424b2fecc8SJaegeuk Kim kmem_cache_free(discard_entry_slab, entry); 12434b2fecc8SJaegeuk Kim } 12444b2fecc8SJaegeuk Kim } 12454b2fecc8SJaegeuk Kim 12460a8165d7SJaegeuk Kim /* 1247351df4b2SJaegeuk Kim * Should call clear_prefree_segments after checkpoint is done. 1248351df4b2SJaegeuk Kim */ 1249351df4b2SJaegeuk Kim static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) 1250351df4b2SJaegeuk Kim { 1251351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1252b65ee148SChao Yu unsigned int segno; 1253351df4b2SJaegeuk Kim 1254351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 12557cd8558bSJaegeuk Kim for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi)) 1256351df4b2SJaegeuk Kim __set_test_and_free(sbi, segno); 1257351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 1258351df4b2SJaegeuk Kim } 1259351df4b2SJaegeuk Kim 1260836b5a63SJaegeuk Kim void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc) 1261351df4b2SJaegeuk Kim { 126246f84c2cSChao Yu struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list); 12632d7b822aSChao Yu struct discard_entry *entry, *this; 1264351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 126529e59c14SChangman Lee unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; 126629e59c14SChangman Lee unsigned int start = 0, end = -1; 126736abef4eSJaegeuk Kim unsigned int secno, start_segno; 1268c24a0fd6SChao Yu bool force = (cpc->reason == CP_DISCARD); 1269351df4b2SJaegeuk Kim 1270351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 127129e59c14SChangman Lee 1272351df4b2SJaegeuk Kim while (1) { 127329e59c14SChangman Lee int i; 12747cd8558bSJaegeuk Kim start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1); 12757cd8558bSJaegeuk Kim if (start >= MAIN_SEGS(sbi)) 1276351df4b2SJaegeuk Kim break; 12777cd8558bSJaegeuk Kim end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi), 12787cd8558bSJaegeuk Kim start + 1); 1279351df4b2SJaegeuk Kim 128029e59c14SChangman Lee for (i = start; i < end; i++) 128129e59c14SChangman Lee clear_bit(i, prefree_map); 1282351df4b2SJaegeuk Kim 128329e59c14SChangman Lee dirty_i->nr_dirty[PRE] -= end - start; 128429e59c14SChangman Lee 1285650d3c4eSYunlei He if (!test_opt(sbi, DISCARD)) 1286650d3c4eSYunlei He continue; 1287650d3c4eSYunlei He 1288650d3c4eSYunlei He if (force && start >= cpc->trim_start && 1289650d3c4eSYunlei He (end - 1) <= cpc->trim_end) 129029e59c14SChangman Lee continue; 129129e59c14SChangman Lee 129236abef4eSJaegeuk Kim if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) { 129337208879SJaegeuk Kim f2fs_issue_discard(sbi, START_BLOCK(sbi, start), 129437208879SJaegeuk Kim (end - start) << sbi->log_blocks_per_seg); 129536abef4eSJaegeuk Kim continue; 129636abef4eSJaegeuk Kim } 129736abef4eSJaegeuk Kim next: 12984ddb1a4dSJaegeuk Kim secno = GET_SEC_FROM_SEG(sbi, start); 12994ddb1a4dSJaegeuk Kim start_segno = GET_SEG_FROM_SEC(sbi, secno); 130036abef4eSJaegeuk Kim if (!IS_CURSEC(sbi, secno) && 1301302bd348SJaegeuk Kim !get_valid_blocks(sbi, start, true)) 130236abef4eSJaegeuk Kim f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno), 130336abef4eSJaegeuk Kim sbi->segs_per_sec << sbi->log_blocks_per_seg); 130436abef4eSJaegeuk Kim 130536abef4eSJaegeuk Kim start = start_segno + sbi->segs_per_sec; 130636abef4eSJaegeuk Kim if (start < end) 130736abef4eSJaegeuk Kim goto next; 13088b107f5bSJaegeuk Kim else 13098b107f5bSJaegeuk Kim end = start - 1; 1310351df4b2SJaegeuk Kim } 1311351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 1312b2955550SJaegeuk Kim 1313b2955550SJaegeuk Kim /* send small discards */ 13142d7b822aSChao Yu list_for_each_entry_safe(entry, this, head, list) { 1315a7eeb823SChao Yu unsigned int cur_pos = 0, next_pos, len, total_len = 0; 1316a7eeb823SChao Yu bool is_valid = test_bit_le(0, entry->discard_map); 1317a7eeb823SChao Yu 1318a7eeb823SChao Yu find_next: 1319a7eeb823SChao Yu if (is_valid) { 1320a7eeb823SChao Yu next_pos = find_next_zero_bit_le(entry->discard_map, 1321a7eeb823SChao Yu sbi->blocks_per_seg, cur_pos); 1322a7eeb823SChao Yu len = next_pos - cur_pos; 1323a7eeb823SChao Yu 1324a7eeb823SChao Yu if (force && len < cpc->trim_minlen) 1325836b5a63SJaegeuk Kim goto skip; 1326a7eeb823SChao Yu 1327a7eeb823SChao Yu f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos, 1328a7eeb823SChao Yu len); 1329a7eeb823SChao Yu cpc->trimmed += len; 1330a7eeb823SChao Yu total_len += len; 1331a7eeb823SChao Yu } else { 1332a7eeb823SChao Yu next_pos = find_next_bit_le(entry->discard_map, 1333a7eeb823SChao Yu sbi->blocks_per_seg, cur_pos); 1334a7eeb823SChao Yu } 1335836b5a63SJaegeuk Kim skip: 1336a7eeb823SChao Yu cur_pos = next_pos; 1337a7eeb823SChao Yu is_valid = !is_valid; 1338a7eeb823SChao Yu 1339a7eeb823SChao Yu if (cur_pos < sbi->blocks_per_seg) 1340a7eeb823SChao Yu goto find_next; 1341a7eeb823SChao Yu 1342b2955550SJaegeuk Kim list_del(&entry->list); 1343a7eeb823SChao Yu SM_I(sbi)->dcc_info->nr_discards -= total_len; 1344b2955550SJaegeuk Kim kmem_cache_free(discard_entry_slab, entry); 1345b2955550SJaegeuk Kim } 134634e159daSChao Yu 134734e159daSChao Yu wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue); 1348351df4b2SJaegeuk Kim } 1349351df4b2SJaegeuk Kim 13508ed59745SJaegeuk Kim static int create_discard_cmd_control(struct f2fs_sb_info *sbi) 13510b54fb84SJaegeuk Kim { 135215469963SJaegeuk Kim dev_t dev = sbi->sb->s_bdev->bd_dev; 13530b54fb84SJaegeuk Kim struct discard_cmd_control *dcc; 1354ba48a33eSChao Yu int err = 0, i; 13550b54fb84SJaegeuk Kim 13560b54fb84SJaegeuk Kim if (SM_I(sbi)->dcc_info) { 13570b54fb84SJaegeuk Kim dcc = SM_I(sbi)->dcc_info; 13580b54fb84SJaegeuk Kim goto init_thread; 13590b54fb84SJaegeuk Kim } 13600b54fb84SJaegeuk Kim 13610b54fb84SJaegeuk Kim dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL); 13620b54fb84SJaegeuk Kim if (!dcc) 13630b54fb84SJaegeuk Kim return -ENOMEM; 13640b54fb84SJaegeuk Kim 136546f84c2cSChao Yu INIT_LIST_HEAD(&dcc->entry_list); 1366ba48a33eSChao Yu for (i = 0; i < MAX_PLIST_NUM; i++) 1367ba48a33eSChao Yu INIT_LIST_HEAD(&dcc->pend_list[i]); 136846f84c2cSChao Yu INIT_LIST_HEAD(&dcc->wait_list); 136915469963SJaegeuk Kim mutex_init(&dcc->cmd_lock); 13708b8dd65fSChao Yu atomic_set(&dcc->issued_discard, 0); 13718b8dd65fSChao Yu atomic_set(&dcc->issing_discard, 0); 13725f32366aSChao Yu atomic_set(&dcc->discard_cmd_cnt, 0); 13730b54fb84SJaegeuk Kim dcc->nr_discards = 0; 1374d618ebafSChao Yu dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg; 1375d84d1cbdSChao Yu dcc->undiscard_blks = 0; 1376004b6862SChao Yu dcc->root = RB_ROOT; 13770b54fb84SJaegeuk Kim 137815469963SJaegeuk Kim init_waitqueue_head(&dcc->discard_wait_queue); 13790b54fb84SJaegeuk Kim SM_I(sbi)->dcc_info = dcc; 13800b54fb84SJaegeuk Kim init_thread: 138115469963SJaegeuk Kim dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi, 138215469963SJaegeuk Kim "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev)); 138315469963SJaegeuk Kim if (IS_ERR(dcc->f2fs_issue_discard)) { 138415469963SJaegeuk Kim err = PTR_ERR(dcc->f2fs_issue_discard); 138515469963SJaegeuk Kim kfree(dcc); 138615469963SJaegeuk Kim SM_I(sbi)->dcc_info = NULL; 138715469963SJaegeuk Kim return err; 138815469963SJaegeuk Kim } 138915469963SJaegeuk Kim 13900b54fb84SJaegeuk Kim return err; 13910b54fb84SJaegeuk Kim } 13920b54fb84SJaegeuk Kim 1393f099405fSChao Yu static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi) 13940b54fb84SJaegeuk Kim { 13950b54fb84SJaegeuk Kim struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 13960b54fb84SJaegeuk Kim 1397f099405fSChao Yu if (!dcc) 1398f099405fSChao Yu return; 1399f099405fSChao Yu 1400f099405fSChao Yu if (dcc->f2fs_issue_discard) { 140115469963SJaegeuk Kim struct task_struct *discard_thread = dcc->f2fs_issue_discard; 140215469963SJaegeuk Kim 140315469963SJaegeuk Kim dcc->f2fs_issue_discard = NULL; 140415469963SJaegeuk Kim kthread_stop(discard_thread); 140515469963SJaegeuk Kim } 1406f099405fSChao Yu 14070b54fb84SJaegeuk Kim kfree(dcc); 14080b54fb84SJaegeuk Kim SM_I(sbi)->dcc_info = NULL; 14090b54fb84SJaegeuk Kim } 14100b54fb84SJaegeuk Kim 1411184a5cd2SChao Yu static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) 1412351df4b2SJaegeuk Kim { 1413351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1414184a5cd2SChao Yu 1415184a5cd2SChao Yu if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) { 1416351df4b2SJaegeuk Kim sit_i->dirty_sentries++; 1417184a5cd2SChao Yu return false; 1418184a5cd2SChao Yu } 1419184a5cd2SChao Yu 1420184a5cd2SChao Yu return true; 1421351df4b2SJaegeuk Kim } 1422351df4b2SJaegeuk Kim 1423351df4b2SJaegeuk Kim static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, 1424351df4b2SJaegeuk Kim unsigned int segno, int modified) 1425351df4b2SJaegeuk Kim { 1426351df4b2SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, segno); 1427351df4b2SJaegeuk Kim se->type = type; 1428351df4b2SJaegeuk Kim if (modified) 1429351df4b2SJaegeuk Kim __mark_sit_entry_dirty(sbi, segno); 1430351df4b2SJaegeuk Kim } 1431351df4b2SJaegeuk Kim 1432351df4b2SJaegeuk Kim static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) 1433351df4b2SJaegeuk Kim { 1434351df4b2SJaegeuk Kim struct seg_entry *se; 1435351df4b2SJaegeuk Kim unsigned int segno, offset; 1436351df4b2SJaegeuk Kim long int new_vblocks; 1437351df4b2SJaegeuk Kim 1438351df4b2SJaegeuk Kim segno = GET_SEGNO(sbi, blkaddr); 1439351df4b2SJaegeuk Kim 1440351df4b2SJaegeuk Kim se = get_seg_entry(sbi, segno); 1441351df4b2SJaegeuk Kim new_vblocks = se->valid_blocks + del; 1442491c0854SJaegeuk Kim offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 1443351df4b2SJaegeuk Kim 14449850cf4aSJaegeuk Kim f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) || 1445351df4b2SJaegeuk Kim (new_vblocks > sbi->blocks_per_seg))); 1446351df4b2SJaegeuk Kim 1447351df4b2SJaegeuk Kim se->valid_blocks = new_vblocks; 1448351df4b2SJaegeuk Kim se->mtime = get_mtime(sbi); 1449351df4b2SJaegeuk Kim SIT_I(sbi)->max_mtime = se->mtime; 1450351df4b2SJaegeuk Kim 1451351df4b2SJaegeuk Kim /* Update valid block bitmap */ 1452351df4b2SJaegeuk Kim if (del > 0) { 1453355e7891SChao Yu if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) { 1454355e7891SChao Yu #ifdef CONFIG_F2FS_CHECK_FS 1455355e7891SChao Yu if (f2fs_test_and_set_bit(offset, 1456355e7891SChao Yu se->cur_valid_map_mir)) 145705796763SJaegeuk Kim f2fs_bug_on(sbi, 1); 1458355e7891SChao Yu else 1459355e7891SChao Yu WARN_ON(1); 1460355e7891SChao Yu #else 1461355e7891SChao Yu f2fs_bug_on(sbi, 1); 1462355e7891SChao Yu #endif 1463355e7891SChao Yu } 14643e025740SJaegeuk Kim if (f2fs_discard_en(sbi) && 14653e025740SJaegeuk Kim !f2fs_test_and_set_bit(offset, se->discard_map)) 1466a66cdd98SJaegeuk Kim sbi->discard_blks--; 1467720037f9SJaegeuk Kim 1468720037f9SJaegeuk Kim /* don't overwrite by SSR to keep node chain */ 1469720037f9SJaegeuk Kim if (se->type == CURSEG_WARM_NODE) { 1470720037f9SJaegeuk Kim if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) 1471720037f9SJaegeuk Kim se->ckpt_valid_blocks++; 1472720037f9SJaegeuk Kim } 1473351df4b2SJaegeuk Kim } else { 1474355e7891SChao Yu if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) { 1475355e7891SChao Yu #ifdef CONFIG_F2FS_CHECK_FS 1476355e7891SChao Yu if (!f2fs_test_and_clear_bit(offset, 1477355e7891SChao Yu se->cur_valid_map_mir)) 147805796763SJaegeuk Kim f2fs_bug_on(sbi, 1); 1479355e7891SChao Yu else 1480355e7891SChao Yu WARN_ON(1); 1481355e7891SChao Yu #else 1482355e7891SChao Yu f2fs_bug_on(sbi, 1); 1483355e7891SChao Yu #endif 1484355e7891SChao Yu } 14853e025740SJaegeuk Kim if (f2fs_discard_en(sbi) && 14863e025740SJaegeuk Kim f2fs_test_and_clear_bit(offset, se->discard_map)) 1487a66cdd98SJaegeuk Kim sbi->discard_blks++; 1488351df4b2SJaegeuk Kim } 1489351df4b2SJaegeuk Kim if (!f2fs_test_bit(offset, se->ckpt_valid_map)) 1490351df4b2SJaegeuk Kim se->ckpt_valid_blocks += del; 1491351df4b2SJaegeuk Kim 1492351df4b2SJaegeuk Kim __mark_sit_entry_dirty(sbi, segno); 1493351df4b2SJaegeuk Kim 1494351df4b2SJaegeuk Kim /* update total number of valid blocks to be written in ckpt area */ 1495351df4b2SJaegeuk Kim SIT_I(sbi)->written_valid_blocks += del; 1496351df4b2SJaegeuk Kim 1497351df4b2SJaegeuk Kim if (sbi->segs_per_sec > 1) 1498351df4b2SJaegeuk Kim get_sec_entry(sbi, segno)->valid_blocks += del; 1499351df4b2SJaegeuk Kim } 1500351df4b2SJaegeuk Kim 15015e443818SJaegeuk Kim void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new) 1502351df4b2SJaegeuk Kim { 15035e443818SJaegeuk Kim update_sit_entry(sbi, new, 1); 15045e443818SJaegeuk Kim if (GET_SEGNO(sbi, old) != NULL_SEGNO) 15055e443818SJaegeuk Kim update_sit_entry(sbi, old, -1); 15065e443818SJaegeuk Kim 15075e443818SJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, old)); 15085e443818SJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, new)); 1509351df4b2SJaegeuk Kim } 1510351df4b2SJaegeuk Kim 1511351df4b2SJaegeuk Kim void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) 1512351df4b2SJaegeuk Kim { 1513351df4b2SJaegeuk Kim unsigned int segno = GET_SEGNO(sbi, addr); 1514351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1515351df4b2SJaegeuk Kim 15169850cf4aSJaegeuk Kim f2fs_bug_on(sbi, addr == NULL_ADDR); 1517351df4b2SJaegeuk Kim if (addr == NEW_ADDR) 1518351df4b2SJaegeuk Kim return; 1519351df4b2SJaegeuk Kim 1520351df4b2SJaegeuk Kim /* add it into sit main buffer */ 1521351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 1522351df4b2SJaegeuk Kim 1523351df4b2SJaegeuk Kim update_sit_entry(sbi, addr, -1); 1524351df4b2SJaegeuk Kim 1525351df4b2SJaegeuk Kim /* add it into dirty seglist */ 1526351df4b2SJaegeuk Kim locate_dirty_segment(sbi, segno); 1527351df4b2SJaegeuk Kim 1528351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 1529351df4b2SJaegeuk Kim } 1530351df4b2SJaegeuk Kim 15316e2c64adSJaegeuk Kim bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) 15326e2c64adSJaegeuk Kim { 15336e2c64adSJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 15346e2c64adSJaegeuk Kim unsigned int segno, offset; 15356e2c64adSJaegeuk Kim struct seg_entry *se; 15366e2c64adSJaegeuk Kim bool is_cp = false; 15376e2c64adSJaegeuk Kim 15386e2c64adSJaegeuk Kim if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) 15396e2c64adSJaegeuk Kim return true; 15406e2c64adSJaegeuk Kim 15416e2c64adSJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 15426e2c64adSJaegeuk Kim 15436e2c64adSJaegeuk Kim segno = GET_SEGNO(sbi, blkaddr); 15446e2c64adSJaegeuk Kim se = get_seg_entry(sbi, segno); 15456e2c64adSJaegeuk Kim offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 15466e2c64adSJaegeuk Kim 15476e2c64adSJaegeuk Kim if (f2fs_test_bit(offset, se->ckpt_valid_map)) 15486e2c64adSJaegeuk Kim is_cp = true; 15496e2c64adSJaegeuk Kim 15506e2c64adSJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 15516e2c64adSJaegeuk Kim 15526e2c64adSJaegeuk Kim return is_cp; 15536e2c64adSJaegeuk Kim } 15546e2c64adSJaegeuk Kim 15550a8165d7SJaegeuk Kim /* 1556351df4b2SJaegeuk Kim * This function should be resided under the curseg_mutex lock 1557351df4b2SJaegeuk Kim */ 1558351df4b2SJaegeuk Kim static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, 1559e79efe3bSHaicheng Li struct f2fs_summary *sum) 1560351df4b2SJaegeuk Kim { 1561351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 1562351df4b2SJaegeuk Kim void *addr = curseg->sum_blk; 1563e79efe3bSHaicheng Li addr += curseg->next_blkoff * sizeof(struct f2fs_summary); 1564351df4b2SJaegeuk Kim memcpy(addr, sum, sizeof(struct f2fs_summary)); 1565351df4b2SJaegeuk Kim } 1566351df4b2SJaegeuk Kim 15670a8165d7SJaegeuk Kim /* 1568351df4b2SJaegeuk Kim * Calculate the number of current summary pages for writing 1569351df4b2SJaegeuk Kim */ 15703fa06d7bSChao Yu int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) 1571351df4b2SJaegeuk Kim { 1572351df4b2SJaegeuk Kim int valid_sum_count = 0; 15739a47938bSFan Li int i, sum_in_page; 1574351df4b2SJaegeuk Kim 1575351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 1576351df4b2SJaegeuk Kim if (sbi->ckpt->alloc_type[i] == SSR) 1577351df4b2SJaegeuk Kim valid_sum_count += sbi->blocks_per_seg; 15783fa06d7bSChao Yu else { 15793fa06d7bSChao Yu if (for_ra) 15803fa06d7bSChao Yu valid_sum_count += le16_to_cpu( 15813fa06d7bSChao Yu F2FS_CKPT(sbi)->cur_data_blkoff[i]); 1582351df4b2SJaegeuk Kim else 1583351df4b2SJaegeuk Kim valid_sum_count += curseg_blkoff(sbi, i); 1584351df4b2SJaegeuk Kim } 15853fa06d7bSChao Yu } 1586351df4b2SJaegeuk Kim 158709cbfeafSKirill A. Shutemov sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE - 15889a47938bSFan Li SUM_FOOTER_SIZE) / SUMMARY_SIZE; 15899a47938bSFan Li if (valid_sum_count <= sum_in_page) 1590351df4b2SJaegeuk Kim return 1; 15919a47938bSFan Li else if ((valid_sum_count - sum_in_page) <= 159209cbfeafSKirill A. Shutemov (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) 1593351df4b2SJaegeuk Kim return 2; 1594351df4b2SJaegeuk Kim return 3; 1595351df4b2SJaegeuk Kim } 1596351df4b2SJaegeuk Kim 15970a8165d7SJaegeuk Kim /* 1598351df4b2SJaegeuk Kim * Caller should put this summary page 1599351df4b2SJaegeuk Kim */ 1600351df4b2SJaegeuk Kim struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) 1601351df4b2SJaegeuk Kim { 1602351df4b2SJaegeuk Kim return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno)); 1603351df4b2SJaegeuk Kim } 1604351df4b2SJaegeuk Kim 1605381722d2SChao Yu void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr) 1606381722d2SChao Yu { 1607381722d2SChao Yu struct page *page = grab_meta_page(sbi, blk_addr); 1608381722d2SChao Yu void *dst = page_address(page); 1609381722d2SChao Yu 1610381722d2SChao Yu if (src) 161109cbfeafSKirill A. Shutemov memcpy(dst, src, PAGE_SIZE); 1612381722d2SChao Yu else 161309cbfeafSKirill A. Shutemov memset(dst, 0, PAGE_SIZE); 1614381722d2SChao Yu set_page_dirty(page); 1615381722d2SChao Yu f2fs_put_page(page, 1); 1616381722d2SChao Yu } 1617381722d2SChao Yu 1618351df4b2SJaegeuk Kim static void write_sum_page(struct f2fs_sb_info *sbi, 1619351df4b2SJaegeuk Kim struct f2fs_summary_block *sum_blk, block_t blk_addr) 1620351df4b2SJaegeuk Kim { 1621381722d2SChao Yu update_meta_page(sbi, (void *)sum_blk, blk_addr); 1622351df4b2SJaegeuk Kim } 1623351df4b2SJaegeuk Kim 1624b7ad7512SChao Yu static void write_current_sum_page(struct f2fs_sb_info *sbi, 1625b7ad7512SChao Yu int type, block_t blk_addr) 1626b7ad7512SChao Yu { 1627b7ad7512SChao Yu struct curseg_info *curseg = CURSEG_I(sbi, type); 1628b7ad7512SChao Yu struct page *page = grab_meta_page(sbi, blk_addr); 1629b7ad7512SChao Yu struct f2fs_summary_block *src = curseg->sum_blk; 1630b7ad7512SChao Yu struct f2fs_summary_block *dst; 1631b7ad7512SChao Yu 1632b7ad7512SChao Yu dst = (struct f2fs_summary_block *)page_address(page); 1633b7ad7512SChao Yu 1634b7ad7512SChao Yu mutex_lock(&curseg->curseg_mutex); 1635b7ad7512SChao Yu 1636b7ad7512SChao Yu down_read(&curseg->journal_rwsem); 1637b7ad7512SChao Yu memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE); 1638b7ad7512SChao Yu up_read(&curseg->journal_rwsem); 1639b7ad7512SChao Yu 1640b7ad7512SChao Yu memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE); 1641b7ad7512SChao Yu memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE); 1642b7ad7512SChao Yu 1643b7ad7512SChao Yu mutex_unlock(&curseg->curseg_mutex); 1644b7ad7512SChao Yu 1645b7ad7512SChao Yu set_page_dirty(page); 1646b7ad7512SChao Yu f2fs_put_page(page, 1); 1647b7ad7512SChao Yu } 1648b7ad7512SChao Yu 1649a7881893SJaegeuk Kim static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) 1650a7881893SJaegeuk Kim { 1651a7881893SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 1652a7881893SJaegeuk Kim unsigned int segno = curseg->segno + 1; 1653a7881893SJaegeuk Kim struct free_segmap_info *free_i = FREE_I(sbi); 1654a7881893SJaegeuk Kim 1655a7881893SJaegeuk Kim if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec) 1656a7881893SJaegeuk Kim return !test_bit(segno, free_i->free_segmap); 1657a7881893SJaegeuk Kim return 0; 1658a7881893SJaegeuk Kim } 1659a7881893SJaegeuk Kim 16600a8165d7SJaegeuk Kim /* 1661351df4b2SJaegeuk Kim * Find a new segment from the free segments bitmap to right order 1662351df4b2SJaegeuk Kim * This function should be returned with success, otherwise BUG 1663351df4b2SJaegeuk Kim */ 1664351df4b2SJaegeuk Kim static void get_new_segment(struct f2fs_sb_info *sbi, 1665351df4b2SJaegeuk Kim unsigned int *newseg, bool new_sec, int dir) 1666351df4b2SJaegeuk Kim { 1667351df4b2SJaegeuk Kim struct free_segmap_info *free_i = FREE_I(sbi); 1668351df4b2SJaegeuk Kim unsigned int segno, secno, zoneno; 16697cd8558bSJaegeuk Kim unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone; 16704ddb1a4dSJaegeuk Kim unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg); 16714ddb1a4dSJaegeuk Kim unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg); 1672351df4b2SJaegeuk Kim unsigned int left_start = hint; 1673351df4b2SJaegeuk Kim bool init = true; 1674351df4b2SJaegeuk Kim int go_left = 0; 1675351df4b2SJaegeuk Kim int i; 1676351df4b2SJaegeuk Kim 16771a118ccfSChao Yu spin_lock(&free_i->segmap_lock); 1678351df4b2SJaegeuk Kim 1679351df4b2SJaegeuk Kim if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { 1680351df4b2SJaegeuk Kim segno = find_next_zero_bit(free_i->free_segmap, 16814ddb1a4dSJaegeuk Kim GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1); 16824ddb1a4dSJaegeuk Kim if (segno < GET_SEG_FROM_SEC(sbi, hint + 1)) 1683351df4b2SJaegeuk Kim goto got_it; 1684351df4b2SJaegeuk Kim } 1685351df4b2SJaegeuk Kim find_other_zone: 16867cd8558bSJaegeuk Kim secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); 16877cd8558bSJaegeuk Kim if (secno >= MAIN_SECS(sbi)) { 1688351df4b2SJaegeuk Kim if (dir == ALLOC_RIGHT) { 1689351df4b2SJaegeuk Kim secno = find_next_zero_bit(free_i->free_secmap, 16907cd8558bSJaegeuk Kim MAIN_SECS(sbi), 0); 16917cd8558bSJaegeuk Kim f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi)); 1692351df4b2SJaegeuk Kim } else { 1693351df4b2SJaegeuk Kim go_left = 1; 1694351df4b2SJaegeuk Kim left_start = hint - 1; 1695351df4b2SJaegeuk Kim } 1696351df4b2SJaegeuk Kim } 1697351df4b2SJaegeuk Kim if (go_left == 0) 1698351df4b2SJaegeuk Kim goto skip_left; 1699351df4b2SJaegeuk Kim 1700351df4b2SJaegeuk Kim while (test_bit(left_start, free_i->free_secmap)) { 1701351df4b2SJaegeuk Kim if (left_start > 0) { 1702351df4b2SJaegeuk Kim left_start--; 1703351df4b2SJaegeuk Kim continue; 1704351df4b2SJaegeuk Kim } 1705351df4b2SJaegeuk Kim left_start = find_next_zero_bit(free_i->free_secmap, 17067cd8558bSJaegeuk Kim MAIN_SECS(sbi), 0); 17077cd8558bSJaegeuk Kim f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi)); 1708351df4b2SJaegeuk Kim break; 1709351df4b2SJaegeuk Kim } 1710351df4b2SJaegeuk Kim secno = left_start; 1711351df4b2SJaegeuk Kim skip_left: 1712351df4b2SJaegeuk Kim hint = secno; 17134ddb1a4dSJaegeuk Kim segno = GET_SEG_FROM_SEC(sbi, secno); 17144ddb1a4dSJaegeuk Kim zoneno = GET_ZONE_FROM_SEC(sbi, secno); 1715351df4b2SJaegeuk Kim 1716351df4b2SJaegeuk Kim /* give up on finding another zone */ 1717351df4b2SJaegeuk Kim if (!init) 1718351df4b2SJaegeuk Kim goto got_it; 1719351df4b2SJaegeuk Kim if (sbi->secs_per_zone == 1) 1720351df4b2SJaegeuk Kim goto got_it; 1721351df4b2SJaegeuk Kim if (zoneno == old_zoneno) 1722351df4b2SJaegeuk Kim goto got_it; 1723351df4b2SJaegeuk Kim if (dir == ALLOC_LEFT) { 1724351df4b2SJaegeuk Kim if (!go_left && zoneno + 1 >= total_zones) 1725351df4b2SJaegeuk Kim goto got_it; 1726351df4b2SJaegeuk Kim if (go_left && zoneno == 0) 1727351df4b2SJaegeuk Kim goto got_it; 1728351df4b2SJaegeuk Kim } 1729351df4b2SJaegeuk Kim for (i = 0; i < NR_CURSEG_TYPE; i++) 1730351df4b2SJaegeuk Kim if (CURSEG_I(sbi, i)->zone == zoneno) 1731351df4b2SJaegeuk Kim break; 1732351df4b2SJaegeuk Kim 1733351df4b2SJaegeuk Kim if (i < NR_CURSEG_TYPE) { 1734351df4b2SJaegeuk Kim /* zone is in user, try another */ 1735351df4b2SJaegeuk Kim if (go_left) 1736351df4b2SJaegeuk Kim hint = zoneno * sbi->secs_per_zone - 1; 1737351df4b2SJaegeuk Kim else if (zoneno + 1 >= total_zones) 1738351df4b2SJaegeuk Kim hint = 0; 1739351df4b2SJaegeuk Kim else 1740351df4b2SJaegeuk Kim hint = (zoneno + 1) * sbi->secs_per_zone; 1741351df4b2SJaegeuk Kim init = false; 1742351df4b2SJaegeuk Kim goto find_other_zone; 1743351df4b2SJaegeuk Kim } 1744351df4b2SJaegeuk Kim got_it: 1745351df4b2SJaegeuk Kim /* set it as dirty segment in free segmap */ 17469850cf4aSJaegeuk Kim f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); 1747351df4b2SJaegeuk Kim __set_inuse(sbi, segno); 1748351df4b2SJaegeuk Kim *newseg = segno; 17491a118ccfSChao Yu spin_unlock(&free_i->segmap_lock); 1750351df4b2SJaegeuk Kim } 1751351df4b2SJaegeuk Kim 1752351df4b2SJaegeuk Kim static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) 1753351df4b2SJaegeuk Kim { 1754351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 1755351df4b2SJaegeuk Kim struct summary_footer *sum_footer; 1756351df4b2SJaegeuk Kim 1757351df4b2SJaegeuk Kim curseg->segno = curseg->next_segno; 17584ddb1a4dSJaegeuk Kim curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno); 1759351df4b2SJaegeuk Kim curseg->next_blkoff = 0; 1760351df4b2SJaegeuk Kim curseg->next_segno = NULL_SEGNO; 1761351df4b2SJaegeuk Kim 1762351df4b2SJaegeuk Kim sum_footer = &(curseg->sum_blk->footer); 1763351df4b2SJaegeuk Kim memset(sum_footer, 0, sizeof(struct summary_footer)); 1764351df4b2SJaegeuk Kim if (IS_DATASEG(type)) 1765351df4b2SJaegeuk Kim SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); 1766351df4b2SJaegeuk Kim if (IS_NODESEG(type)) 1767351df4b2SJaegeuk Kim SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); 1768351df4b2SJaegeuk Kim __set_sit_entry_type(sbi, type, curseg->segno, modified); 1769351df4b2SJaegeuk Kim } 1770351df4b2SJaegeuk Kim 17717a20b8a6SJaegeuk Kim static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type) 17727a20b8a6SJaegeuk Kim { 1773a7881893SJaegeuk Kim /* if segs_per_sec is large than 1, we need to keep original policy. */ 1774a7881893SJaegeuk Kim if (sbi->segs_per_sec != 1) 1775a7881893SJaegeuk Kim return CURSEG_I(sbi, type)->segno; 1776a7881893SJaegeuk Kim 17777a20b8a6SJaegeuk Kim if (type == CURSEG_HOT_DATA || IS_NODESEG(type)) 17787a20b8a6SJaegeuk Kim return 0; 17797a20b8a6SJaegeuk Kim 1780e066b83cSJaegeuk Kim if (SIT_I(sbi)->last_victim[ALLOC_NEXT]) 1781e066b83cSJaegeuk Kim return SIT_I(sbi)->last_victim[ALLOC_NEXT]; 17827a20b8a6SJaegeuk Kim return CURSEG_I(sbi, type)->segno; 17837a20b8a6SJaegeuk Kim } 17847a20b8a6SJaegeuk Kim 17850a8165d7SJaegeuk Kim /* 1786351df4b2SJaegeuk Kim * Allocate a current working segment. 1787351df4b2SJaegeuk Kim * This function always allocates a free segment in LFS manner. 1788351df4b2SJaegeuk Kim */ 1789351df4b2SJaegeuk Kim static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) 1790351df4b2SJaegeuk Kim { 1791351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 1792351df4b2SJaegeuk Kim unsigned int segno = curseg->segno; 1793351df4b2SJaegeuk Kim int dir = ALLOC_LEFT; 1794351df4b2SJaegeuk Kim 1795351df4b2SJaegeuk Kim write_sum_page(sbi, curseg->sum_blk, 179681fb5e87SHaicheng Li GET_SUM_BLOCK(sbi, segno)); 1797351df4b2SJaegeuk Kim if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA) 1798351df4b2SJaegeuk Kim dir = ALLOC_RIGHT; 1799351df4b2SJaegeuk Kim 1800351df4b2SJaegeuk Kim if (test_opt(sbi, NOHEAP)) 1801351df4b2SJaegeuk Kim dir = ALLOC_RIGHT; 1802351df4b2SJaegeuk Kim 18037a20b8a6SJaegeuk Kim segno = __get_next_segno(sbi, type); 1804351df4b2SJaegeuk Kim get_new_segment(sbi, &segno, new_sec, dir); 1805351df4b2SJaegeuk Kim curseg->next_segno = segno; 1806351df4b2SJaegeuk Kim reset_curseg(sbi, type, 1); 1807351df4b2SJaegeuk Kim curseg->alloc_type = LFS; 1808351df4b2SJaegeuk Kim } 1809351df4b2SJaegeuk Kim 1810351df4b2SJaegeuk Kim static void __next_free_blkoff(struct f2fs_sb_info *sbi, 1811351df4b2SJaegeuk Kim struct curseg_info *seg, block_t start) 1812351df4b2SJaegeuk Kim { 1813351df4b2SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, seg->segno); 1814e81c93cfSChangman Lee int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 181560a3b782SJaegeuk Kim unsigned long *target_map = SIT_I(sbi)->tmp_map; 1816e81c93cfSChangman Lee unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 1817e81c93cfSChangman Lee unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 1818e81c93cfSChangman Lee int i, pos; 1819e81c93cfSChangman Lee 1820e81c93cfSChangman Lee for (i = 0; i < entries; i++) 1821e81c93cfSChangman Lee target_map[i] = ckpt_map[i] | cur_map[i]; 1822e81c93cfSChangman Lee 1823e81c93cfSChangman Lee pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start); 1824e81c93cfSChangman Lee 1825e81c93cfSChangman Lee seg->next_blkoff = pos; 1826351df4b2SJaegeuk Kim } 1827351df4b2SJaegeuk Kim 18280a8165d7SJaegeuk Kim /* 1829351df4b2SJaegeuk Kim * If a segment is written by LFS manner, next block offset is just obtained 1830351df4b2SJaegeuk Kim * by increasing the current block offset. However, if a segment is written by 1831351df4b2SJaegeuk Kim * SSR manner, next block offset obtained by calling __next_free_blkoff 1832351df4b2SJaegeuk Kim */ 1833351df4b2SJaegeuk Kim static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, 1834351df4b2SJaegeuk Kim struct curseg_info *seg) 1835351df4b2SJaegeuk Kim { 1836351df4b2SJaegeuk Kim if (seg->alloc_type == SSR) 1837351df4b2SJaegeuk Kim __next_free_blkoff(sbi, seg, seg->next_blkoff + 1); 1838351df4b2SJaegeuk Kim else 1839351df4b2SJaegeuk Kim seg->next_blkoff++; 1840351df4b2SJaegeuk Kim } 1841351df4b2SJaegeuk Kim 18420a8165d7SJaegeuk Kim /* 1843351df4b2SJaegeuk Kim * This function always allocates a used segment(from dirty seglist) by SSR 1844351df4b2SJaegeuk Kim * manner, so it should recover the existing segment information of valid blocks 1845351df4b2SJaegeuk Kim */ 1846351df4b2SJaegeuk Kim static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse) 1847351df4b2SJaegeuk Kim { 1848351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1849351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 1850351df4b2SJaegeuk Kim unsigned int new_segno = curseg->next_segno; 1851351df4b2SJaegeuk Kim struct f2fs_summary_block *sum_node; 1852351df4b2SJaegeuk Kim struct page *sum_page; 1853351df4b2SJaegeuk Kim 1854351df4b2SJaegeuk Kim write_sum_page(sbi, curseg->sum_blk, 1855351df4b2SJaegeuk Kim GET_SUM_BLOCK(sbi, curseg->segno)); 1856351df4b2SJaegeuk Kim __set_test_and_inuse(sbi, new_segno); 1857351df4b2SJaegeuk Kim 1858351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 1859351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, new_segno, PRE); 1860351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, new_segno, DIRTY); 1861351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 1862351df4b2SJaegeuk Kim 1863351df4b2SJaegeuk Kim reset_curseg(sbi, type, 1); 1864351df4b2SJaegeuk Kim curseg->alloc_type = SSR; 1865351df4b2SJaegeuk Kim __next_free_blkoff(sbi, curseg, 0); 1866351df4b2SJaegeuk Kim 1867351df4b2SJaegeuk Kim if (reuse) { 1868351df4b2SJaegeuk Kim sum_page = get_sum_page(sbi, new_segno); 1869351df4b2SJaegeuk Kim sum_node = (struct f2fs_summary_block *)page_address(sum_page); 1870351df4b2SJaegeuk Kim memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); 1871351df4b2SJaegeuk Kim f2fs_put_page(sum_page, 1); 1872351df4b2SJaegeuk Kim } 1873351df4b2SJaegeuk Kim } 1874351df4b2SJaegeuk Kim 187543727527SJaegeuk Kim static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) 187643727527SJaegeuk Kim { 187743727527SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 187843727527SJaegeuk Kim const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops; 1879e066b83cSJaegeuk Kim unsigned segno = NULL_SEGNO; 1880d27c3d89SChao Yu int i, cnt; 1881d27c3d89SChao Yu bool reversed = false; 1882c192f7a4SJaegeuk Kim 1883c192f7a4SJaegeuk Kim /* need_SSR() already forces to do this */ 1884e066b83cSJaegeuk Kim if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) { 1885e066b83cSJaegeuk Kim curseg->next_segno = segno; 1886c192f7a4SJaegeuk Kim return 1; 1887e066b83cSJaegeuk Kim } 188843727527SJaegeuk Kim 188970d625cbSJaegeuk Kim /* For node segments, let's do SSR more intensively */ 189070d625cbSJaegeuk Kim if (IS_NODESEG(type)) { 1891d27c3d89SChao Yu if (type >= CURSEG_WARM_NODE) { 1892d27c3d89SChao Yu reversed = true; 1893d27c3d89SChao Yu i = CURSEG_COLD_NODE; 1894d27c3d89SChao Yu } else { 189570d625cbSJaegeuk Kim i = CURSEG_HOT_NODE; 1896d27c3d89SChao Yu } 1897d27c3d89SChao Yu cnt = NR_CURSEG_NODE_TYPE; 1898d27c3d89SChao Yu } else { 1899d27c3d89SChao Yu if (type >= CURSEG_WARM_DATA) { 1900d27c3d89SChao Yu reversed = true; 1901d27c3d89SChao Yu i = CURSEG_COLD_DATA; 190270d625cbSJaegeuk Kim } else { 190370d625cbSJaegeuk Kim i = CURSEG_HOT_DATA; 1904d27c3d89SChao Yu } 1905d27c3d89SChao Yu cnt = NR_CURSEG_DATA_TYPE; 190670d625cbSJaegeuk Kim } 190743727527SJaegeuk Kim 1908d27c3d89SChao Yu for (; cnt-- > 0; reversed ? i-- : i++) { 1909c192f7a4SJaegeuk Kim if (i == type) 1910c192f7a4SJaegeuk Kim continue; 1911e066b83cSJaegeuk Kim if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) { 1912e066b83cSJaegeuk Kim curseg->next_segno = segno; 191343727527SJaegeuk Kim return 1; 1914c192f7a4SJaegeuk Kim } 1915e066b83cSJaegeuk Kim } 191643727527SJaegeuk Kim return 0; 191743727527SJaegeuk Kim } 191843727527SJaegeuk Kim 1919351df4b2SJaegeuk Kim /* 1920351df4b2SJaegeuk Kim * flush out current segment and replace it with new segment 1921351df4b2SJaegeuk Kim * This function should be returned with success, otherwise BUG 1922351df4b2SJaegeuk Kim */ 1923351df4b2SJaegeuk Kim static void allocate_segment_by_default(struct f2fs_sb_info *sbi, 1924351df4b2SJaegeuk Kim int type, bool force) 1925351df4b2SJaegeuk Kim { 1926a7881893SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 1927a7881893SJaegeuk Kim 19287b405275SGu Zheng if (force) 1929351df4b2SJaegeuk Kim new_curseg(sbi, type, true); 19305b6c6be2SJaegeuk Kim else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) && 19315b6c6be2SJaegeuk Kim type == CURSEG_WARM_NODE) 1932351df4b2SJaegeuk Kim new_curseg(sbi, type, false); 1933a7881893SJaegeuk Kim else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type)) 1934a7881893SJaegeuk Kim new_curseg(sbi, type, false); 1935351df4b2SJaegeuk Kim else if (need_SSR(sbi) && get_ssr_segment(sbi, type)) 1936351df4b2SJaegeuk Kim change_curseg(sbi, type, true); 1937351df4b2SJaegeuk Kim else 1938351df4b2SJaegeuk Kim new_curseg(sbi, type, false); 1939dcdfff65SJaegeuk Kim 1940a7881893SJaegeuk Kim stat_inc_seg_type(sbi, curseg); 1941351df4b2SJaegeuk Kim } 1942351df4b2SJaegeuk Kim 1943351df4b2SJaegeuk Kim void allocate_new_segments(struct f2fs_sb_info *sbi) 1944351df4b2SJaegeuk Kim { 19456ae1be13SJaegeuk Kim struct curseg_info *curseg; 19466ae1be13SJaegeuk Kim unsigned int old_segno; 1947351df4b2SJaegeuk Kim int i; 1948351df4b2SJaegeuk Kim 19496ae1be13SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 19506ae1be13SJaegeuk Kim curseg = CURSEG_I(sbi, i); 19516ae1be13SJaegeuk Kim old_segno = curseg->segno; 19526ae1be13SJaegeuk Kim SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); 19536ae1be13SJaegeuk Kim locate_dirty_segment(sbi, old_segno); 19546ae1be13SJaegeuk Kim } 1955351df4b2SJaegeuk Kim } 1956351df4b2SJaegeuk Kim 1957351df4b2SJaegeuk Kim static const struct segment_allocation default_salloc_ops = { 1958351df4b2SJaegeuk Kim .allocate_segment = allocate_segment_by_default, 1959351df4b2SJaegeuk Kim }; 1960351df4b2SJaegeuk Kim 196125290fa5SJaegeuk Kim bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc) 196225290fa5SJaegeuk Kim { 196325290fa5SJaegeuk Kim __u64 trim_start = cpc->trim_start; 196425290fa5SJaegeuk Kim bool has_candidate = false; 196525290fa5SJaegeuk Kim 196625290fa5SJaegeuk Kim mutex_lock(&SIT_I(sbi)->sentry_lock); 196725290fa5SJaegeuk Kim for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) { 196825290fa5SJaegeuk Kim if (add_discard_addrs(sbi, cpc, true)) { 196925290fa5SJaegeuk Kim has_candidate = true; 197025290fa5SJaegeuk Kim break; 197125290fa5SJaegeuk Kim } 197225290fa5SJaegeuk Kim } 197325290fa5SJaegeuk Kim mutex_unlock(&SIT_I(sbi)->sentry_lock); 197425290fa5SJaegeuk Kim 197525290fa5SJaegeuk Kim cpc->trim_start = trim_start; 197625290fa5SJaegeuk Kim return has_candidate; 197725290fa5SJaegeuk Kim } 197825290fa5SJaegeuk Kim 19794b2fecc8SJaegeuk Kim int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) 19804b2fecc8SJaegeuk Kim { 1981f7ef9b83SJaegeuk Kim __u64 start = F2FS_BYTES_TO_BLK(range->start); 1982f7ef9b83SJaegeuk Kim __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; 19834b2fecc8SJaegeuk Kim unsigned int start_segno, end_segno; 19844b2fecc8SJaegeuk Kim struct cp_control cpc; 1985c34f42e2SChao Yu int err = 0; 19864b2fecc8SJaegeuk Kim 1987836b5a63SJaegeuk Kim if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) 19884b2fecc8SJaegeuk Kim return -EINVAL; 19894b2fecc8SJaegeuk Kim 19909bd27ae4SJan Kara cpc.trimmed = 0; 19917cd8558bSJaegeuk Kim if (end <= MAIN_BLKADDR(sbi)) 19924b2fecc8SJaegeuk Kim goto out; 19934b2fecc8SJaegeuk Kim 1994ed214a11SYunlei He if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { 1995ed214a11SYunlei He f2fs_msg(sbi->sb, KERN_WARNING, 1996ed214a11SYunlei He "Found FS corruption, run fsck to fix."); 1997ed214a11SYunlei He goto out; 1998ed214a11SYunlei He } 1999ed214a11SYunlei He 20004b2fecc8SJaegeuk Kim /* start/end segment number in main_area */ 20017cd8558bSJaegeuk Kim start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); 20027cd8558bSJaegeuk Kim end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : 20037cd8558bSJaegeuk Kim GET_SEGNO(sbi, end); 20044b2fecc8SJaegeuk Kim cpc.reason = CP_DISCARD; 2005836b5a63SJaegeuk Kim cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen)); 20064b2fecc8SJaegeuk Kim 20074b2fecc8SJaegeuk Kim /* do checkpoint to issue discard commands safely */ 2008bba681cbSJaegeuk Kim for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) { 2009bba681cbSJaegeuk Kim cpc.trim_start = start_segno; 2010a66cdd98SJaegeuk Kim 2011a66cdd98SJaegeuk Kim if (sbi->discard_blks == 0) 2012a66cdd98SJaegeuk Kim break; 2013a66cdd98SJaegeuk Kim else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi)) 2014a66cdd98SJaegeuk Kim cpc.trim_end = end_segno; 2015a66cdd98SJaegeuk Kim else 2016a66cdd98SJaegeuk Kim cpc.trim_end = min_t(unsigned int, 2017a66cdd98SJaegeuk Kim rounddown(start_segno + 2018bba681cbSJaegeuk Kim BATCHED_TRIM_SEGMENTS(sbi), 2019bba681cbSJaegeuk Kim sbi->segs_per_sec) - 1, end_segno); 2020bba681cbSJaegeuk Kim 2021ca4b02eeSJaegeuk Kim mutex_lock(&sbi->gc_mutex); 2022c34f42e2SChao Yu err = write_checkpoint(sbi, &cpc); 2023ca4b02eeSJaegeuk Kim mutex_unlock(&sbi->gc_mutex); 2024e9328353SChao Yu if (err) 2025e9328353SChao Yu break; 202674fa5f3dSChao Yu 202774fa5f3dSChao Yu schedule(); 2028bba681cbSJaegeuk Kim } 20294b2fecc8SJaegeuk Kim out: 2030f7ef9b83SJaegeuk Kim range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); 2031c34f42e2SChao Yu return err; 20324b2fecc8SJaegeuk Kim } 20334b2fecc8SJaegeuk Kim 2034351df4b2SJaegeuk Kim static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) 2035351df4b2SJaegeuk Kim { 2036351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 2037351df4b2SJaegeuk Kim if (curseg->next_blkoff < sbi->blocks_per_seg) 2038351df4b2SJaegeuk Kim return true; 2039351df4b2SJaegeuk Kim return false; 2040351df4b2SJaegeuk Kim } 2041351df4b2SJaegeuk Kim 2042351df4b2SJaegeuk Kim static int __get_segment_type_2(struct page *page, enum page_type p_type) 2043351df4b2SJaegeuk Kim { 2044351df4b2SJaegeuk Kim if (p_type == DATA) 2045351df4b2SJaegeuk Kim return CURSEG_HOT_DATA; 2046351df4b2SJaegeuk Kim else 2047351df4b2SJaegeuk Kim return CURSEG_HOT_NODE; 2048351df4b2SJaegeuk Kim } 2049351df4b2SJaegeuk Kim 2050351df4b2SJaegeuk Kim static int __get_segment_type_4(struct page *page, enum page_type p_type) 2051351df4b2SJaegeuk Kim { 2052351df4b2SJaegeuk Kim if (p_type == DATA) { 2053351df4b2SJaegeuk Kim struct inode *inode = page->mapping->host; 2054351df4b2SJaegeuk Kim 2055351df4b2SJaegeuk Kim if (S_ISDIR(inode->i_mode)) 2056351df4b2SJaegeuk Kim return CURSEG_HOT_DATA; 2057351df4b2SJaegeuk Kim else 2058351df4b2SJaegeuk Kim return CURSEG_COLD_DATA; 2059351df4b2SJaegeuk Kim } else { 2060a344b9fdSJaegeuk Kim if (IS_DNODE(page) && is_cold_node(page)) 2061a344b9fdSJaegeuk Kim return CURSEG_WARM_NODE; 2062351df4b2SJaegeuk Kim else 2063351df4b2SJaegeuk Kim return CURSEG_COLD_NODE; 2064351df4b2SJaegeuk Kim } 2065351df4b2SJaegeuk Kim } 2066351df4b2SJaegeuk Kim 2067351df4b2SJaegeuk Kim static int __get_segment_type_6(struct page *page, enum page_type p_type) 2068351df4b2SJaegeuk Kim { 2069351df4b2SJaegeuk Kim if (p_type == DATA) { 2070351df4b2SJaegeuk Kim struct inode *inode = page->mapping->host; 2071351df4b2SJaegeuk Kim 2072ef095d19SJaegeuk Kim if (is_cold_data(page) || file_is_cold(inode)) 2073351df4b2SJaegeuk Kim return CURSEG_COLD_DATA; 2074ef095d19SJaegeuk Kim if (is_inode_flag_set(inode, FI_HOT_DATA)) 2075ef095d19SJaegeuk Kim return CURSEG_HOT_DATA; 2076351df4b2SJaegeuk Kim return CURSEG_WARM_DATA; 2077351df4b2SJaegeuk Kim } else { 2078351df4b2SJaegeuk Kim if (IS_DNODE(page)) 2079351df4b2SJaegeuk Kim return is_cold_node(page) ? CURSEG_WARM_NODE : 2080351df4b2SJaegeuk Kim CURSEG_HOT_NODE; 2081351df4b2SJaegeuk Kim return CURSEG_COLD_NODE; 2082351df4b2SJaegeuk Kim } 2083351df4b2SJaegeuk Kim } 2084351df4b2SJaegeuk Kim 2085351df4b2SJaegeuk Kim static int __get_segment_type(struct page *page, enum page_type p_type) 2086351df4b2SJaegeuk Kim { 20874081363fSJaegeuk Kim switch (F2FS_P_SB(page)->active_logs) { 2088351df4b2SJaegeuk Kim case 2: 2089351df4b2SJaegeuk Kim return __get_segment_type_2(page, p_type); 2090351df4b2SJaegeuk Kim case 4: 2091351df4b2SJaegeuk Kim return __get_segment_type_4(page, p_type); 2092351df4b2SJaegeuk Kim } 209312a67146SJaegeuk Kim /* NR_CURSEG_TYPE(6) logs by default */ 20949850cf4aSJaegeuk Kim f2fs_bug_on(F2FS_P_SB(page), 20959850cf4aSJaegeuk Kim F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE); 209612a67146SJaegeuk Kim return __get_segment_type_6(page, p_type); 2097351df4b2SJaegeuk Kim } 2098351df4b2SJaegeuk Kim 2099bfad7c2dSJaegeuk Kim void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 2100351df4b2SJaegeuk Kim block_t old_blkaddr, block_t *new_blkaddr, 2101bfad7c2dSJaegeuk Kim struct f2fs_summary *sum, int type) 2102351df4b2SJaegeuk Kim { 2103351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 21046ae1be13SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 2105351df4b2SJaegeuk Kim 2106351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 210721cb1d99SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 2108351df4b2SJaegeuk Kim 2109351df4b2SJaegeuk Kim *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 2110351df4b2SJaegeuk Kim 21114e6a8d9bSJaegeuk Kim f2fs_wait_discard_bio(sbi, *new_blkaddr); 21124e6a8d9bSJaegeuk Kim 2113351df4b2SJaegeuk Kim /* 2114351df4b2SJaegeuk Kim * __add_sum_entry should be resided under the curseg_mutex 2115351df4b2SJaegeuk Kim * because, this function updates a summary entry in the 2116351df4b2SJaegeuk Kim * current summary block. 2117351df4b2SJaegeuk Kim */ 2118e79efe3bSHaicheng Li __add_sum_entry(sbi, type, sum); 2119351df4b2SJaegeuk Kim 2120351df4b2SJaegeuk Kim __refresh_next_blkoff(sbi, curseg); 2121dcdfff65SJaegeuk Kim 2122dcdfff65SJaegeuk Kim stat_inc_block_count(sbi, curseg); 2123351df4b2SJaegeuk Kim 21243436c4bdSYunlong Song if (!__has_curseg_space(sbi, type)) 21253436c4bdSYunlong Song sit_i->s_ops->allocate_segment(sbi, type, false); 2126c6f82fe9SJaegeuk Kim /* 2127c6f82fe9SJaegeuk Kim * SIT information should be updated after segment allocation, 2128c6f82fe9SJaegeuk Kim * since we need to keep dirty segments precisely under SSR. 2129c6f82fe9SJaegeuk Kim */ 2130c6f82fe9SJaegeuk Kim refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); 21313436c4bdSYunlong Song 2132351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 2133351df4b2SJaegeuk Kim 2134bfad7c2dSJaegeuk Kim if (page && IS_NODESEG(type)) 2135351df4b2SJaegeuk Kim fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); 2136351df4b2SJaegeuk Kim 2137bfad7c2dSJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 2138bfad7c2dSJaegeuk Kim } 2139bfad7c2dSJaegeuk Kim 214005ca3632SJaegeuk Kim static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) 2141bfad7c2dSJaegeuk Kim { 214205ca3632SJaegeuk Kim int type = __get_segment_type(fio->page, fio->type); 21430a595ebaSJaegeuk Kim int err; 2144bfad7c2dSJaegeuk Kim 21457dfeaa32SJaegeuk Kim if (fio->type == NODE || fio->type == DATA) 21467dfeaa32SJaegeuk Kim mutex_lock(&fio->sbi->wio_mutex[fio->type]); 21470a595ebaSJaegeuk Kim reallocate: 21487a9d7548SChao Yu allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, 21497a9d7548SChao Yu &fio->new_blkaddr, sum, type); 2150bfad7c2dSJaegeuk Kim 2151351df4b2SJaegeuk Kim /* writeout dirty page into bdev */ 21520a595ebaSJaegeuk Kim err = f2fs_submit_page_mbio(fio); 21530a595ebaSJaegeuk Kim if (err == -EAGAIN) { 21540a595ebaSJaegeuk Kim fio->old_blkaddr = fio->new_blkaddr; 21550a595ebaSJaegeuk Kim goto reallocate; 21560a595ebaSJaegeuk Kim } 21577dfeaa32SJaegeuk Kim 21587dfeaa32SJaegeuk Kim if (fio->type == NODE || fio->type == DATA) 21597dfeaa32SJaegeuk Kim mutex_unlock(&fio->sbi->wio_mutex[fio->type]); 2160351df4b2SJaegeuk Kim } 2161351df4b2SJaegeuk Kim 2162577e3495SJaegeuk Kim void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) 2163351df4b2SJaegeuk Kim { 2164458e6197SJaegeuk Kim struct f2fs_io_info fio = { 216505ca3632SJaegeuk Kim .sbi = sbi, 2166458e6197SJaegeuk Kim .type = META, 216704d328deSMike Christie .op = REQ_OP_WRITE, 216870fd7614SChristoph Hellwig .op_flags = REQ_SYNC | REQ_META | REQ_PRIO, 21697a9d7548SChao Yu .old_blkaddr = page->index, 21707a9d7548SChao Yu .new_blkaddr = page->index, 217105ca3632SJaegeuk Kim .page = page, 21724375a336SJaegeuk Kim .encrypted_page = NULL, 2173458e6197SJaegeuk Kim }; 2174458e6197SJaegeuk Kim 21752b947003SChao Yu if (unlikely(page->index >= MAIN_BLKADDR(sbi))) 217604d328deSMike Christie fio.op_flags &= ~REQ_META; 21772b947003SChao Yu 2178351df4b2SJaegeuk Kim set_page_writeback(page); 217905ca3632SJaegeuk Kim f2fs_submit_page_mbio(&fio); 2180351df4b2SJaegeuk Kim } 2181351df4b2SJaegeuk Kim 218205ca3632SJaegeuk Kim void write_node_page(unsigned int nid, struct f2fs_io_info *fio) 2183351df4b2SJaegeuk Kim { 2184351df4b2SJaegeuk Kim struct f2fs_summary sum; 218505ca3632SJaegeuk Kim 2186351df4b2SJaegeuk Kim set_summary(&sum, nid, 0, 0); 218705ca3632SJaegeuk Kim do_write_page(&sum, fio); 2188351df4b2SJaegeuk Kim } 2189351df4b2SJaegeuk Kim 219005ca3632SJaegeuk Kim void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio) 2191351df4b2SJaegeuk Kim { 219205ca3632SJaegeuk Kim struct f2fs_sb_info *sbi = fio->sbi; 2193351df4b2SJaegeuk Kim struct f2fs_summary sum; 2194351df4b2SJaegeuk Kim struct node_info ni; 2195351df4b2SJaegeuk Kim 21969850cf4aSJaegeuk Kim f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); 2197351df4b2SJaegeuk Kim get_node_info(sbi, dn->nid, &ni); 2198351df4b2SJaegeuk Kim set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 219905ca3632SJaegeuk Kim do_write_page(&sum, fio); 2200f28b3434SChao Yu f2fs_update_data_blkaddr(dn, fio->new_blkaddr); 2201351df4b2SJaegeuk Kim } 2202351df4b2SJaegeuk Kim 2203d1b3e72dSJaegeuk Kim int rewrite_data_page(struct f2fs_io_info *fio) 2204351df4b2SJaegeuk Kim { 22057a9d7548SChao Yu fio->new_blkaddr = fio->old_blkaddr; 220605ca3632SJaegeuk Kim stat_inc_inplace_blocks(fio->sbi); 2207d1b3e72dSJaegeuk Kim return f2fs_submit_page_bio(fio); 2208351df4b2SJaegeuk Kim } 2209351df4b2SJaegeuk Kim 22104356e48eSChao Yu void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 221119f106bcSChao Yu block_t old_blkaddr, block_t new_blkaddr, 221228bc106bSChao Yu bool recover_curseg, bool recover_newaddr) 2213351df4b2SJaegeuk Kim { 2214351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 2215351df4b2SJaegeuk Kim struct curseg_info *curseg; 2216351df4b2SJaegeuk Kim unsigned int segno, old_cursegno; 2217351df4b2SJaegeuk Kim struct seg_entry *se; 2218351df4b2SJaegeuk Kim int type; 221919f106bcSChao Yu unsigned short old_blkoff; 2220351df4b2SJaegeuk Kim 2221351df4b2SJaegeuk Kim segno = GET_SEGNO(sbi, new_blkaddr); 2222351df4b2SJaegeuk Kim se = get_seg_entry(sbi, segno); 2223351df4b2SJaegeuk Kim type = se->type; 2224351df4b2SJaegeuk Kim 222519f106bcSChao Yu if (!recover_curseg) { 222619f106bcSChao Yu /* for recovery flow */ 2227351df4b2SJaegeuk Kim if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { 2228351df4b2SJaegeuk Kim if (old_blkaddr == NULL_ADDR) 2229351df4b2SJaegeuk Kim type = CURSEG_COLD_DATA; 2230351df4b2SJaegeuk Kim else 2231351df4b2SJaegeuk Kim type = CURSEG_WARM_DATA; 2232351df4b2SJaegeuk Kim } 223319f106bcSChao Yu } else { 223419f106bcSChao Yu if (!IS_CURSEG(sbi, segno)) 223519f106bcSChao Yu type = CURSEG_WARM_DATA; 223619f106bcSChao Yu } 223719f106bcSChao Yu 2238351df4b2SJaegeuk Kim curseg = CURSEG_I(sbi, type); 2239351df4b2SJaegeuk Kim 2240351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 2241351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 2242351df4b2SJaegeuk Kim 2243351df4b2SJaegeuk Kim old_cursegno = curseg->segno; 224419f106bcSChao Yu old_blkoff = curseg->next_blkoff; 2245351df4b2SJaegeuk Kim 2246351df4b2SJaegeuk Kim /* change the current segment */ 2247351df4b2SJaegeuk Kim if (segno != curseg->segno) { 2248351df4b2SJaegeuk Kim curseg->next_segno = segno; 2249351df4b2SJaegeuk Kim change_curseg(sbi, type, true); 2250351df4b2SJaegeuk Kim } 2251351df4b2SJaegeuk Kim 2252491c0854SJaegeuk Kim curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr); 2253e79efe3bSHaicheng Li __add_sum_entry(sbi, type, sum); 2254351df4b2SJaegeuk Kim 225528bc106bSChao Yu if (!recover_curseg || recover_newaddr) 22566e2c64adSJaegeuk Kim update_sit_entry(sbi, new_blkaddr, 1); 22576e2c64adSJaegeuk Kim if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) 22586e2c64adSJaegeuk Kim update_sit_entry(sbi, old_blkaddr, -1); 22596e2c64adSJaegeuk Kim 22606e2c64adSJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 22616e2c64adSJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr)); 22626e2c64adSJaegeuk Kim 2263351df4b2SJaegeuk Kim locate_dirty_segment(sbi, old_cursegno); 2264351df4b2SJaegeuk Kim 226519f106bcSChao Yu if (recover_curseg) { 226619f106bcSChao Yu if (old_cursegno != curseg->segno) { 226719f106bcSChao Yu curseg->next_segno = old_cursegno; 226819f106bcSChao Yu change_curseg(sbi, type, true); 226919f106bcSChao Yu } 227019f106bcSChao Yu curseg->next_blkoff = old_blkoff; 227119f106bcSChao Yu } 227219f106bcSChao Yu 2273351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 2274351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 2275351df4b2SJaegeuk Kim } 2276351df4b2SJaegeuk Kim 2277528e3459SChao Yu void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 2278528e3459SChao Yu block_t old_addr, block_t new_addr, 227928bc106bSChao Yu unsigned char version, bool recover_curseg, 228028bc106bSChao Yu bool recover_newaddr) 2281528e3459SChao Yu { 2282528e3459SChao Yu struct f2fs_summary sum; 2283528e3459SChao Yu 2284528e3459SChao Yu set_summary(&sum, dn->nid, dn->ofs_in_node, version); 2285528e3459SChao Yu 228628bc106bSChao Yu __f2fs_replace_block(sbi, &sum, old_addr, new_addr, 228728bc106bSChao Yu recover_curseg, recover_newaddr); 2288528e3459SChao Yu 2289f28b3434SChao Yu f2fs_update_data_blkaddr(dn, new_addr); 2290528e3459SChao Yu } 2291528e3459SChao Yu 229293dfe2acSJaegeuk Kim void f2fs_wait_on_page_writeback(struct page *page, 2293fec1d657SJaegeuk Kim enum page_type type, bool ordered) 229493dfe2acSJaegeuk Kim { 229593dfe2acSJaegeuk Kim if (PageWriteback(page)) { 22964081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_P_SB(page); 22974081363fSJaegeuk Kim 2298942fd319SJaegeuk Kim f2fs_submit_merged_bio_cond(sbi, page->mapping->host, 2299942fd319SJaegeuk Kim 0, page->index, type, WRITE); 2300fec1d657SJaegeuk Kim if (ordered) 230193dfe2acSJaegeuk Kim wait_on_page_writeback(page); 2302fec1d657SJaegeuk Kim else 2303fec1d657SJaegeuk Kim wait_for_stable_page(page); 230493dfe2acSJaegeuk Kim } 230593dfe2acSJaegeuk Kim } 230693dfe2acSJaegeuk Kim 230708b39fbdSChao Yu void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi, 230808b39fbdSChao Yu block_t blkaddr) 230908b39fbdSChao Yu { 231008b39fbdSChao Yu struct page *cpage; 231108b39fbdSChao Yu 23125d4c0af4SYunlei He if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) 231308b39fbdSChao Yu return; 231408b39fbdSChao Yu 231508b39fbdSChao Yu cpage = find_lock_page(META_MAPPING(sbi), blkaddr); 231608b39fbdSChao Yu if (cpage) { 2317fec1d657SJaegeuk Kim f2fs_wait_on_page_writeback(cpage, DATA, true); 231808b39fbdSChao Yu f2fs_put_page(cpage, 1); 231908b39fbdSChao Yu } 232008b39fbdSChao Yu } 232108b39fbdSChao Yu 2322351df4b2SJaegeuk Kim static int read_compacted_summaries(struct f2fs_sb_info *sbi) 2323351df4b2SJaegeuk Kim { 2324351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2325351df4b2SJaegeuk Kim struct curseg_info *seg_i; 2326351df4b2SJaegeuk Kim unsigned char *kaddr; 2327351df4b2SJaegeuk Kim struct page *page; 2328351df4b2SJaegeuk Kim block_t start; 2329351df4b2SJaegeuk Kim int i, j, offset; 2330351df4b2SJaegeuk Kim 2331351df4b2SJaegeuk Kim start = start_sum_block(sbi); 2332351df4b2SJaegeuk Kim 2333351df4b2SJaegeuk Kim page = get_meta_page(sbi, start++); 2334351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 2335351df4b2SJaegeuk Kim 2336351df4b2SJaegeuk Kim /* Step 1: restore nat cache */ 2337351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 2338b7ad7512SChao Yu memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE); 2339351df4b2SJaegeuk Kim 2340351df4b2SJaegeuk Kim /* Step 2: restore sit cache */ 2341351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 2342b7ad7512SChao Yu memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE); 2343351df4b2SJaegeuk Kim offset = 2 * SUM_JOURNAL_SIZE; 2344351df4b2SJaegeuk Kim 2345351df4b2SJaegeuk Kim /* Step 3: restore summary entries */ 2346351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 2347351df4b2SJaegeuk Kim unsigned short blk_off; 2348351df4b2SJaegeuk Kim unsigned int segno; 2349351df4b2SJaegeuk Kim 2350351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, i); 2351351df4b2SJaegeuk Kim segno = le32_to_cpu(ckpt->cur_data_segno[i]); 2352351df4b2SJaegeuk Kim blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); 2353351df4b2SJaegeuk Kim seg_i->next_segno = segno; 2354351df4b2SJaegeuk Kim reset_curseg(sbi, i, 0); 2355351df4b2SJaegeuk Kim seg_i->alloc_type = ckpt->alloc_type[i]; 2356351df4b2SJaegeuk Kim seg_i->next_blkoff = blk_off; 2357351df4b2SJaegeuk Kim 2358351df4b2SJaegeuk Kim if (seg_i->alloc_type == SSR) 2359351df4b2SJaegeuk Kim blk_off = sbi->blocks_per_seg; 2360351df4b2SJaegeuk Kim 2361351df4b2SJaegeuk Kim for (j = 0; j < blk_off; j++) { 2362351df4b2SJaegeuk Kim struct f2fs_summary *s; 2363351df4b2SJaegeuk Kim s = (struct f2fs_summary *)(kaddr + offset); 2364351df4b2SJaegeuk Kim seg_i->sum_blk->entries[j] = *s; 2365351df4b2SJaegeuk Kim offset += SUMMARY_SIZE; 236609cbfeafSKirill A. Shutemov if (offset + SUMMARY_SIZE <= PAGE_SIZE - 2367351df4b2SJaegeuk Kim SUM_FOOTER_SIZE) 2368351df4b2SJaegeuk Kim continue; 2369351df4b2SJaegeuk Kim 2370351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 2371351df4b2SJaegeuk Kim page = NULL; 2372351df4b2SJaegeuk Kim 2373351df4b2SJaegeuk Kim page = get_meta_page(sbi, start++); 2374351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 2375351df4b2SJaegeuk Kim offset = 0; 2376351df4b2SJaegeuk Kim } 2377351df4b2SJaegeuk Kim } 2378351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 2379351df4b2SJaegeuk Kim return 0; 2380351df4b2SJaegeuk Kim } 2381351df4b2SJaegeuk Kim 2382351df4b2SJaegeuk Kim static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) 2383351df4b2SJaegeuk Kim { 2384351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2385351df4b2SJaegeuk Kim struct f2fs_summary_block *sum; 2386351df4b2SJaegeuk Kim struct curseg_info *curseg; 2387351df4b2SJaegeuk Kim struct page *new; 2388351df4b2SJaegeuk Kim unsigned short blk_off; 2389351df4b2SJaegeuk Kim unsigned int segno = 0; 2390351df4b2SJaegeuk Kim block_t blk_addr = 0; 2391351df4b2SJaegeuk Kim 2392351df4b2SJaegeuk Kim /* get segment number and block addr */ 2393351df4b2SJaegeuk Kim if (IS_DATASEG(type)) { 2394351df4b2SJaegeuk Kim segno = le32_to_cpu(ckpt->cur_data_segno[type]); 2395351df4b2SJaegeuk Kim blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - 2396351df4b2SJaegeuk Kim CURSEG_HOT_DATA]); 2397119ee914SJaegeuk Kim if (__exist_node_summaries(sbi)) 2398351df4b2SJaegeuk Kim blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); 2399351df4b2SJaegeuk Kim else 2400351df4b2SJaegeuk Kim blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); 2401351df4b2SJaegeuk Kim } else { 2402351df4b2SJaegeuk Kim segno = le32_to_cpu(ckpt->cur_node_segno[type - 2403351df4b2SJaegeuk Kim CURSEG_HOT_NODE]); 2404351df4b2SJaegeuk Kim blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - 2405351df4b2SJaegeuk Kim CURSEG_HOT_NODE]); 2406119ee914SJaegeuk Kim if (__exist_node_summaries(sbi)) 2407351df4b2SJaegeuk Kim blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, 2408351df4b2SJaegeuk Kim type - CURSEG_HOT_NODE); 2409351df4b2SJaegeuk Kim else 2410351df4b2SJaegeuk Kim blk_addr = GET_SUM_BLOCK(sbi, segno); 2411351df4b2SJaegeuk Kim } 2412351df4b2SJaegeuk Kim 2413351df4b2SJaegeuk Kim new = get_meta_page(sbi, blk_addr); 2414351df4b2SJaegeuk Kim sum = (struct f2fs_summary_block *)page_address(new); 2415351df4b2SJaegeuk Kim 2416351df4b2SJaegeuk Kim if (IS_NODESEG(type)) { 2417119ee914SJaegeuk Kim if (__exist_node_summaries(sbi)) { 2418351df4b2SJaegeuk Kim struct f2fs_summary *ns = &sum->entries[0]; 2419351df4b2SJaegeuk Kim int i; 2420351df4b2SJaegeuk Kim for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { 2421351df4b2SJaegeuk Kim ns->version = 0; 2422351df4b2SJaegeuk Kim ns->ofs_in_node = 0; 2423351df4b2SJaegeuk Kim } 2424351df4b2SJaegeuk Kim } else { 2425d653788aSGu Zheng int err; 2426d653788aSGu Zheng 2427d653788aSGu Zheng err = restore_node_summary(sbi, segno, sum); 2428d653788aSGu Zheng if (err) { 2429351df4b2SJaegeuk Kim f2fs_put_page(new, 1); 2430d653788aSGu Zheng return err; 2431351df4b2SJaegeuk Kim } 2432351df4b2SJaegeuk Kim } 2433351df4b2SJaegeuk Kim } 2434351df4b2SJaegeuk Kim 2435351df4b2SJaegeuk Kim /* set uncompleted segment to curseg */ 2436351df4b2SJaegeuk Kim curseg = CURSEG_I(sbi, type); 2437351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 2438b7ad7512SChao Yu 2439b7ad7512SChao Yu /* update journal info */ 2440b7ad7512SChao Yu down_write(&curseg->journal_rwsem); 2441b7ad7512SChao Yu memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE); 2442b7ad7512SChao Yu up_write(&curseg->journal_rwsem); 2443b7ad7512SChao Yu 2444b7ad7512SChao Yu memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE); 2445b7ad7512SChao Yu memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE); 2446351df4b2SJaegeuk Kim curseg->next_segno = segno; 2447351df4b2SJaegeuk Kim reset_curseg(sbi, type, 0); 2448351df4b2SJaegeuk Kim curseg->alloc_type = ckpt->alloc_type[type]; 2449351df4b2SJaegeuk Kim curseg->next_blkoff = blk_off; 2450351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 2451351df4b2SJaegeuk Kim f2fs_put_page(new, 1); 2452351df4b2SJaegeuk Kim return 0; 2453351df4b2SJaegeuk Kim } 2454351df4b2SJaegeuk Kim 2455351df4b2SJaegeuk Kim static int restore_curseg_summaries(struct f2fs_sb_info *sbi) 2456351df4b2SJaegeuk Kim { 2457351df4b2SJaegeuk Kim int type = CURSEG_HOT_DATA; 2458e4fc5fbfSChao Yu int err; 2459351df4b2SJaegeuk Kim 2460aaec2b1dSChao Yu if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) { 24613fa06d7bSChao Yu int npages = npages_for_summary_flush(sbi, true); 24623fa06d7bSChao Yu 24633fa06d7bSChao Yu if (npages >= 2) 24643fa06d7bSChao Yu ra_meta_pages(sbi, start_sum_block(sbi), npages, 246526879fb1SChao Yu META_CP, true); 24663fa06d7bSChao Yu 2467351df4b2SJaegeuk Kim /* restore for compacted data summary */ 2468351df4b2SJaegeuk Kim if (read_compacted_summaries(sbi)) 2469351df4b2SJaegeuk Kim return -EINVAL; 2470351df4b2SJaegeuk Kim type = CURSEG_HOT_NODE; 2471351df4b2SJaegeuk Kim } 2472351df4b2SJaegeuk Kim 2473119ee914SJaegeuk Kim if (__exist_node_summaries(sbi)) 24743fa06d7bSChao Yu ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type), 247526879fb1SChao Yu NR_CURSEG_TYPE - type, META_CP, true); 24763fa06d7bSChao Yu 2477e4fc5fbfSChao Yu for (; type <= CURSEG_COLD_NODE; type++) { 2478e4fc5fbfSChao Yu err = read_normal_summaries(sbi, type); 2479e4fc5fbfSChao Yu if (err) 2480e4fc5fbfSChao Yu return err; 2481e4fc5fbfSChao Yu } 2482e4fc5fbfSChao Yu 2483351df4b2SJaegeuk Kim return 0; 2484351df4b2SJaegeuk Kim } 2485351df4b2SJaegeuk Kim 2486351df4b2SJaegeuk Kim static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) 2487351df4b2SJaegeuk Kim { 2488351df4b2SJaegeuk Kim struct page *page; 2489351df4b2SJaegeuk Kim unsigned char *kaddr; 2490351df4b2SJaegeuk Kim struct f2fs_summary *summary; 2491351df4b2SJaegeuk Kim struct curseg_info *seg_i; 2492351df4b2SJaegeuk Kim int written_size = 0; 2493351df4b2SJaegeuk Kim int i, j; 2494351df4b2SJaegeuk Kim 2495351df4b2SJaegeuk Kim page = grab_meta_page(sbi, blkaddr++); 2496351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 2497351df4b2SJaegeuk Kim 2498351df4b2SJaegeuk Kim /* Step 1: write nat cache */ 2499351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 2500b7ad7512SChao Yu memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE); 2501351df4b2SJaegeuk Kim written_size += SUM_JOURNAL_SIZE; 2502351df4b2SJaegeuk Kim 2503351df4b2SJaegeuk Kim /* Step 2: write sit cache */ 2504351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 2505b7ad7512SChao Yu memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE); 2506351df4b2SJaegeuk Kim written_size += SUM_JOURNAL_SIZE; 2507351df4b2SJaegeuk Kim 2508351df4b2SJaegeuk Kim /* Step 3: write summary entries */ 2509351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 2510351df4b2SJaegeuk Kim unsigned short blkoff; 2511351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, i); 2512351df4b2SJaegeuk Kim if (sbi->ckpt->alloc_type[i] == SSR) 2513351df4b2SJaegeuk Kim blkoff = sbi->blocks_per_seg; 2514351df4b2SJaegeuk Kim else 2515351df4b2SJaegeuk Kim blkoff = curseg_blkoff(sbi, i); 2516351df4b2SJaegeuk Kim 2517351df4b2SJaegeuk Kim for (j = 0; j < blkoff; j++) { 2518351df4b2SJaegeuk Kim if (!page) { 2519351df4b2SJaegeuk Kim page = grab_meta_page(sbi, blkaddr++); 2520351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 2521351df4b2SJaegeuk Kim written_size = 0; 2522351df4b2SJaegeuk Kim } 2523351df4b2SJaegeuk Kim summary = (struct f2fs_summary *)(kaddr + written_size); 2524351df4b2SJaegeuk Kim *summary = seg_i->sum_blk->entries[j]; 2525351df4b2SJaegeuk Kim written_size += SUMMARY_SIZE; 2526351df4b2SJaegeuk Kim 252709cbfeafSKirill A. Shutemov if (written_size + SUMMARY_SIZE <= PAGE_SIZE - 2528351df4b2SJaegeuk Kim SUM_FOOTER_SIZE) 2529351df4b2SJaegeuk Kim continue; 2530351df4b2SJaegeuk Kim 2531e8d61a74SChao Yu set_page_dirty(page); 2532351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 2533351df4b2SJaegeuk Kim page = NULL; 2534351df4b2SJaegeuk Kim } 2535351df4b2SJaegeuk Kim } 2536e8d61a74SChao Yu if (page) { 2537e8d61a74SChao Yu set_page_dirty(page); 2538351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 2539351df4b2SJaegeuk Kim } 2540e8d61a74SChao Yu } 2541351df4b2SJaegeuk Kim 2542351df4b2SJaegeuk Kim static void write_normal_summaries(struct f2fs_sb_info *sbi, 2543351df4b2SJaegeuk Kim block_t blkaddr, int type) 2544351df4b2SJaegeuk Kim { 2545351df4b2SJaegeuk Kim int i, end; 2546351df4b2SJaegeuk Kim if (IS_DATASEG(type)) 2547351df4b2SJaegeuk Kim end = type + NR_CURSEG_DATA_TYPE; 2548351df4b2SJaegeuk Kim else 2549351df4b2SJaegeuk Kim end = type + NR_CURSEG_NODE_TYPE; 2550351df4b2SJaegeuk Kim 2551b7ad7512SChao Yu for (i = type; i < end; i++) 2552b7ad7512SChao Yu write_current_sum_page(sbi, i, blkaddr + (i - type)); 2553351df4b2SJaegeuk Kim } 2554351df4b2SJaegeuk Kim 2555351df4b2SJaegeuk Kim void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 2556351df4b2SJaegeuk Kim { 2557aaec2b1dSChao Yu if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) 2558351df4b2SJaegeuk Kim write_compacted_summaries(sbi, start_blk); 2559351df4b2SJaegeuk Kim else 2560351df4b2SJaegeuk Kim write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); 2561351df4b2SJaegeuk Kim } 2562351df4b2SJaegeuk Kim 2563351df4b2SJaegeuk Kim void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 2564351df4b2SJaegeuk Kim { 2565351df4b2SJaegeuk Kim write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); 2566351df4b2SJaegeuk Kim } 2567351df4b2SJaegeuk Kim 2568dfc08a12SChao Yu int lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 2569351df4b2SJaegeuk Kim unsigned int val, int alloc) 2570351df4b2SJaegeuk Kim { 2571351df4b2SJaegeuk Kim int i; 2572351df4b2SJaegeuk Kim 2573351df4b2SJaegeuk Kim if (type == NAT_JOURNAL) { 2574dfc08a12SChao Yu for (i = 0; i < nats_in_cursum(journal); i++) { 2575dfc08a12SChao Yu if (le32_to_cpu(nid_in_journal(journal, i)) == val) 2576351df4b2SJaegeuk Kim return i; 2577351df4b2SJaegeuk Kim } 2578dfc08a12SChao Yu if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL)) 2579dfc08a12SChao Yu return update_nats_in_cursum(journal, 1); 2580351df4b2SJaegeuk Kim } else if (type == SIT_JOURNAL) { 2581dfc08a12SChao Yu for (i = 0; i < sits_in_cursum(journal); i++) 2582dfc08a12SChao Yu if (le32_to_cpu(segno_in_journal(journal, i)) == val) 2583351df4b2SJaegeuk Kim return i; 2584dfc08a12SChao Yu if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL)) 2585dfc08a12SChao Yu return update_sits_in_cursum(journal, 1); 2586351df4b2SJaegeuk Kim } 2587351df4b2SJaegeuk Kim return -1; 2588351df4b2SJaegeuk Kim } 2589351df4b2SJaegeuk Kim 2590351df4b2SJaegeuk Kim static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, 2591351df4b2SJaegeuk Kim unsigned int segno) 2592351df4b2SJaegeuk Kim { 25932cc22186SGu Zheng return get_meta_page(sbi, current_sit_addr(sbi, segno)); 2594351df4b2SJaegeuk Kim } 2595351df4b2SJaegeuk Kim 2596351df4b2SJaegeuk Kim static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, 2597351df4b2SJaegeuk Kim unsigned int start) 2598351df4b2SJaegeuk Kim { 2599351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 2600351df4b2SJaegeuk Kim struct page *src_page, *dst_page; 2601351df4b2SJaegeuk Kim pgoff_t src_off, dst_off; 2602351df4b2SJaegeuk Kim void *src_addr, *dst_addr; 2603351df4b2SJaegeuk Kim 2604351df4b2SJaegeuk Kim src_off = current_sit_addr(sbi, start); 2605351df4b2SJaegeuk Kim dst_off = next_sit_addr(sbi, src_off); 2606351df4b2SJaegeuk Kim 2607351df4b2SJaegeuk Kim /* get current sit block page without lock */ 2608351df4b2SJaegeuk Kim src_page = get_meta_page(sbi, src_off); 2609351df4b2SJaegeuk Kim dst_page = grab_meta_page(sbi, dst_off); 26109850cf4aSJaegeuk Kim f2fs_bug_on(sbi, PageDirty(src_page)); 2611351df4b2SJaegeuk Kim 2612351df4b2SJaegeuk Kim src_addr = page_address(src_page); 2613351df4b2SJaegeuk Kim dst_addr = page_address(dst_page); 261409cbfeafSKirill A. Shutemov memcpy(dst_addr, src_addr, PAGE_SIZE); 2615351df4b2SJaegeuk Kim 2616351df4b2SJaegeuk Kim set_page_dirty(dst_page); 2617351df4b2SJaegeuk Kim f2fs_put_page(src_page, 1); 2618351df4b2SJaegeuk Kim 2619351df4b2SJaegeuk Kim set_to_next_sit(sit_i, start); 2620351df4b2SJaegeuk Kim 2621351df4b2SJaegeuk Kim return dst_page; 2622351df4b2SJaegeuk Kim } 2623351df4b2SJaegeuk Kim 2624184a5cd2SChao Yu static struct sit_entry_set *grab_sit_entry_set(void) 2625184a5cd2SChao Yu { 2626184a5cd2SChao Yu struct sit_entry_set *ses = 262780c54505SJaegeuk Kim f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS); 2628184a5cd2SChao Yu 2629184a5cd2SChao Yu ses->entry_cnt = 0; 2630184a5cd2SChao Yu INIT_LIST_HEAD(&ses->set_list); 2631184a5cd2SChao Yu return ses; 2632184a5cd2SChao Yu } 2633184a5cd2SChao Yu 2634184a5cd2SChao Yu static void release_sit_entry_set(struct sit_entry_set *ses) 2635184a5cd2SChao Yu { 2636184a5cd2SChao Yu list_del(&ses->set_list); 2637184a5cd2SChao Yu kmem_cache_free(sit_entry_set_slab, ses); 2638184a5cd2SChao Yu } 2639184a5cd2SChao Yu 2640184a5cd2SChao Yu static void adjust_sit_entry_set(struct sit_entry_set *ses, 2641184a5cd2SChao Yu struct list_head *head) 2642184a5cd2SChao Yu { 2643184a5cd2SChao Yu struct sit_entry_set *next = ses; 2644184a5cd2SChao Yu 2645184a5cd2SChao Yu if (list_is_last(&ses->set_list, head)) 2646184a5cd2SChao Yu return; 2647184a5cd2SChao Yu 2648184a5cd2SChao Yu list_for_each_entry_continue(next, head, set_list) 2649184a5cd2SChao Yu if (ses->entry_cnt <= next->entry_cnt) 2650184a5cd2SChao Yu break; 2651184a5cd2SChao Yu 2652184a5cd2SChao Yu list_move_tail(&ses->set_list, &next->set_list); 2653184a5cd2SChao Yu } 2654184a5cd2SChao Yu 2655184a5cd2SChao Yu static void add_sit_entry(unsigned int segno, struct list_head *head) 2656184a5cd2SChao Yu { 2657184a5cd2SChao Yu struct sit_entry_set *ses; 2658184a5cd2SChao Yu unsigned int start_segno = START_SEGNO(segno); 2659184a5cd2SChao Yu 2660184a5cd2SChao Yu list_for_each_entry(ses, head, set_list) { 2661184a5cd2SChao Yu if (ses->start_segno == start_segno) { 2662184a5cd2SChao Yu ses->entry_cnt++; 2663184a5cd2SChao Yu adjust_sit_entry_set(ses, head); 2664184a5cd2SChao Yu return; 2665184a5cd2SChao Yu } 2666184a5cd2SChao Yu } 2667184a5cd2SChao Yu 2668184a5cd2SChao Yu ses = grab_sit_entry_set(); 2669184a5cd2SChao Yu 2670184a5cd2SChao Yu ses->start_segno = start_segno; 2671184a5cd2SChao Yu ses->entry_cnt++; 2672184a5cd2SChao Yu list_add(&ses->set_list, head); 2673184a5cd2SChao Yu } 2674184a5cd2SChao Yu 2675184a5cd2SChao Yu static void add_sits_in_set(struct f2fs_sb_info *sbi) 2676184a5cd2SChao Yu { 2677184a5cd2SChao Yu struct f2fs_sm_info *sm_info = SM_I(sbi); 2678184a5cd2SChao Yu struct list_head *set_list = &sm_info->sit_entry_set; 2679184a5cd2SChao Yu unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap; 2680184a5cd2SChao Yu unsigned int segno; 2681184a5cd2SChao Yu 26827cd8558bSJaegeuk Kim for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi)) 2683184a5cd2SChao Yu add_sit_entry(segno, set_list); 2684184a5cd2SChao Yu } 2685184a5cd2SChao Yu 2686184a5cd2SChao Yu static void remove_sits_in_journal(struct f2fs_sb_info *sbi) 2687351df4b2SJaegeuk Kim { 2688351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 2689b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal; 2690351df4b2SJaegeuk Kim int i; 2691351df4b2SJaegeuk Kim 2692b7ad7512SChao Yu down_write(&curseg->journal_rwsem); 2693dfc08a12SChao Yu for (i = 0; i < sits_in_cursum(journal); i++) { 2694351df4b2SJaegeuk Kim unsigned int segno; 2695184a5cd2SChao Yu bool dirtied; 2696184a5cd2SChao Yu 2697dfc08a12SChao Yu segno = le32_to_cpu(segno_in_journal(journal, i)); 2698184a5cd2SChao Yu dirtied = __mark_sit_entry_dirty(sbi, segno); 2699184a5cd2SChao Yu 2700184a5cd2SChao Yu if (!dirtied) 2701184a5cd2SChao Yu add_sit_entry(segno, &SM_I(sbi)->sit_entry_set); 2702351df4b2SJaegeuk Kim } 2703dfc08a12SChao Yu update_sits_in_cursum(journal, -i); 2704b7ad7512SChao Yu up_write(&curseg->journal_rwsem); 2705351df4b2SJaegeuk Kim } 2706351df4b2SJaegeuk Kim 27070a8165d7SJaegeuk Kim /* 2708351df4b2SJaegeuk Kim * CP calls this function, which flushes SIT entries including sit_journal, 2709351df4b2SJaegeuk Kim * and moves prefree segs to free segs. 2710351df4b2SJaegeuk Kim */ 27114b2fecc8SJaegeuk Kim void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) 2712351df4b2SJaegeuk Kim { 2713351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 2714351df4b2SJaegeuk Kim unsigned long *bitmap = sit_i->dirty_sentries_bitmap; 2715351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 2716b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal; 2717184a5cd2SChao Yu struct sit_entry_set *ses, *tmp; 2718184a5cd2SChao Yu struct list_head *head = &SM_I(sbi)->sit_entry_set; 2719184a5cd2SChao Yu bool to_journal = true; 27204b2fecc8SJaegeuk Kim struct seg_entry *se; 2721351df4b2SJaegeuk Kim 2722351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 2723351df4b2SJaegeuk Kim 27242b11a74bSWanpeng Li if (!sit_i->dirty_sentries) 27252b11a74bSWanpeng Li goto out; 27262b11a74bSWanpeng Li 2727351df4b2SJaegeuk Kim /* 2728184a5cd2SChao Yu * add and account sit entries of dirty bitmap in sit entry 2729184a5cd2SChao Yu * set temporarily 2730351df4b2SJaegeuk Kim */ 2731184a5cd2SChao Yu add_sits_in_set(sbi); 2732351df4b2SJaegeuk Kim 2733184a5cd2SChao Yu /* 2734184a5cd2SChao Yu * if there are no enough space in journal to store dirty sit 2735184a5cd2SChao Yu * entries, remove all entries from journal and add and account 2736184a5cd2SChao Yu * them in sit entry set. 2737184a5cd2SChao Yu */ 2738dfc08a12SChao Yu if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL)) 2739184a5cd2SChao Yu remove_sits_in_journal(sbi); 2740184a5cd2SChao Yu 2741184a5cd2SChao Yu /* 2742184a5cd2SChao Yu * there are two steps to flush sit entries: 2743184a5cd2SChao Yu * #1, flush sit entries to journal in current cold data summary block. 2744184a5cd2SChao Yu * #2, flush sit entries to sit page. 2745184a5cd2SChao Yu */ 2746184a5cd2SChao Yu list_for_each_entry_safe(ses, tmp, head, set_list) { 27474a257ed6SJaegeuk Kim struct page *page = NULL; 2748184a5cd2SChao Yu struct f2fs_sit_block *raw_sit = NULL; 2749184a5cd2SChao Yu unsigned int start_segno = ses->start_segno; 2750184a5cd2SChao Yu unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK, 27517cd8558bSJaegeuk Kim (unsigned long)MAIN_SEGS(sbi)); 2752184a5cd2SChao Yu unsigned int segno = start_segno; 2753184a5cd2SChao Yu 2754184a5cd2SChao Yu if (to_journal && 2755dfc08a12SChao Yu !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL)) 2756184a5cd2SChao Yu to_journal = false; 2757184a5cd2SChao Yu 2758b7ad7512SChao Yu if (to_journal) { 2759b7ad7512SChao Yu down_write(&curseg->journal_rwsem); 2760b7ad7512SChao Yu } else { 2761184a5cd2SChao Yu page = get_next_sit_page(sbi, start_segno); 2762184a5cd2SChao Yu raw_sit = page_address(page); 2763184a5cd2SChao Yu } 2764184a5cd2SChao Yu 2765184a5cd2SChao Yu /* flush dirty sit entries in region of current sit set */ 2766184a5cd2SChao Yu for_each_set_bit_from(segno, bitmap, end) { 2767184a5cd2SChao Yu int offset, sit_offset; 27684b2fecc8SJaegeuk Kim 27694b2fecc8SJaegeuk Kim se = get_seg_entry(sbi, segno); 2770351df4b2SJaegeuk Kim 2771b2955550SJaegeuk Kim /* add discard candidates */ 2772d7bc2484SJaegeuk Kim if (cpc->reason != CP_DISCARD) { 27734b2fecc8SJaegeuk Kim cpc->trim_start = segno; 277425290fa5SJaegeuk Kim add_discard_addrs(sbi, cpc, false); 27754b2fecc8SJaegeuk Kim } 2776b2955550SJaegeuk Kim 2777184a5cd2SChao Yu if (to_journal) { 2778dfc08a12SChao Yu offset = lookup_journal_in_cursum(journal, 2779184a5cd2SChao Yu SIT_JOURNAL, segno, 1); 2780184a5cd2SChao Yu f2fs_bug_on(sbi, offset < 0); 2781dfc08a12SChao Yu segno_in_journal(journal, offset) = 2782184a5cd2SChao Yu cpu_to_le32(segno); 2783184a5cd2SChao Yu seg_info_to_raw_sit(se, 2784dfc08a12SChao Yu &sit_in_journal(journal, offset)); 2785184a5cd2SChao Yu } else { 2786184a5cd2SChao Yu sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); 2787184a5cd2SChao Yu seg_info_to_raw_sit(se, 2788184a5cd2SChao Yu &raw_sit->entries[sit_offset]); 2789351df4b2SJaegeuk Kim } 2790351df4b2SJaegeuk Kim 2791351df4b2SJaegeuk Kim __clear_bit(segno, bitmap); 2792351df4b2SJaegeuk Kim sit_i->dirty_sentries--; 2793184a5cd2SChao Yu ses->entry_cnt--; 2794351df4b2SJaegeuk Kim } 2795184a5cd2SChao Yu 2796b7ad7512SChao Yu if (to_journal) 2797b7ad7512SChao Yu up_write(&curseg->journal_rwsem); 2798b7ad7512SChao Yu else 2799184a5cd2SChao Yu f2fs_put_page(page, 1); 2800184a5cd2SChao Yu 2801184a5cd2SChao Yu f2fs_bug_on(sbi, ses->entry_cnt); 2802184a5cd2SChao Yu release_sit_entry_set(ses); 2803184a5cd2SChao Yu } 2804184a5cd2SChao Yu 2805184a5cd2SChao Yu f2fs_bug_on(sbi, !list_empty(head)); 2806184a5cd2SChao Yu f2fs_bug_on(sbi, sit_i->dirty_sentries); 2807184a5cd2SChao Yu out: 28084b2fecc8SJaegeuk Kim if (cpc->reason == CP_DISCARD) { 2809650d3c4eSYunlei He __u64 trim_start = cpc->trim_start; 2810650d3c4eSYunlei He 28114b2fecc8SJaegeuk Kim for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) 281225290fa5SJaegeuk Kim add_discard_addrs(sbi, cpc, false); 2813650d3c4eSYunlei He 2814650d3c4eSYunlei He cpc->trim_start = trim_start; 28154b2fecc8SJaegeuk Kim } 2816351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 2817351df4b2SJaegeuk Kim 2818351df4b2SJaegeuk Kim set_prefree_as_free_segments(sbi); 2819351df4b2SJaegeuk Kim } 2820351df4b2SJaegeuk Kim 2821351df4b2SJaegeuk Kim static int build_sit_info(struct f2fs_sb_info *sbi) 2822351df4b2SJaegeuk Kim { 2823351df4b2SJaegeuk Kim struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 2824351df4b2SJaegeuk Kim struct sit_info *sit_i; 2825351df4b2SJaegeuk Kim unsigned int sit_segs, start; 2826ae27d62eSChao Yu char *src_bitmap; 2827351df4b2SJaegeuk Kim unsigned int bitmap_size; 2828351df4b2SJaegeuk Kim 2829351df4b2SJaegeuk Kim /* allocate memory for SIT information */ 2830351df4b2SJaegeuk Kim sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL); 2831351df4b2SJaegeuk Kim if (!sit_i) 2832351df4b2SJaegeuk Kim return -ENOMEM; 2833351df4b2SJaegeuk Kim 2834351df4b2SJaegeuk Kim SM_I(sbi)->sit_info = sit_i; 2835351df4b2SJaegeuk Kim 283639307a8eSJaegeuk Kim sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) * 283739307a8eSJaegeuk Kim sizeof(struct seg_entry), GFP_KERNEL); 2838351df4b2SJaegeuk Kim if (!sit_i->sentries) 2839351df4b2SJaegeuk Kim return -ENOMEM; 2840351df4b2SJaegeuk Kim 28417cd8558bSJaegeuk Kim bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 284239307a8eSJaegeuk Kim sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); 2843351df4b2SJaegeuk Kim if (!sit_i->dirty_sentries_bitmap) 2844351df4b2SJaegeuk Kim return -ENOMEM; 2845351df4b2SJaegeuk Kim 28467cd8558bSJaegeuk Kim for (start = 0; start < MAIN_SEGS(sbi); start++) { 2847351df4b2SJaegeuk Kim sit_i->sentries[start].cur_valid_map 2848351df4b2SJaegeuk Kim = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 2849351df4b2SJaegeuk Kim sit_i->sentries[start].ckpt_valid_map 2850351df4b2SJaegeuk Kim = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 28513e025740SJaegeuk Kim if (!sit_i->sentries[start].cur_valid_map || 28523e025740SJaegeuk Kim !sit_i->sentries[start].ckpt_valid_map) 28533e025740SJaegeuk Kim return -ENOMEM; 28543e025740SJaegeuk Kim 2855355e7891SChao Yu #ifdef CONFIG_F2FS_CHECK_FS 2856355e7891SChao Yu sit_i->sentries[start].cur_valid_map_mir 2857355e7891SChao Yu = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 2858355e7891SChao Yu if (!sit_i->sentries[start].cur_valid_map_mir) 2859355e7891SChao Yu return -ENOMEM; 2860355e7891SChao Yu #endif 2861355e7891SChao Yu 28623e025740SJaegeuk Kim if (f2fs_discard_en(sbi)) { 2863a66cdd98SJaegeuk Kim sit_i->sentries[start].discard_map 2864a66cdd98SJaegeuk Kim = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 28653e025740SJaegeuk Kim if (!sit_i->sentries[start].discard_map) 2866351df4b2SJaegeuk Kim return -ENOMEM; 2867351df4b2SJaegeuk Kim } 28683e025740SJaegeuk Kim } 2869351df4b2SJaegeuk Kim 287060a3b782SJaegeuk Kim sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 287160a3b782SJaegeuk Kim if (!sit_i->tmp_map) 287260a3b782SJaegeuk Kim return -ENOMEM; 287360a3b782SJaegeuk Kim 2874351df4b2SJaegeuk Kim if (sbi->segs_per_sec > 1) { 287539307a8eSJaegeuk Kim sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) * 287639307a8eSJaegeuk Kim sizeof(struct sec_entry), GFP_KERNEL); 2877351df4b2SJaegeuk Kim if (!sit_i->sec_entries) 2878351df4b2SJaegeuk Kim return -ENOMEM; 2879351df4b2SJaegeuk Kim } 2880351df4b2SJaegeuk Kim 2881351df4b2SJaegeuk Kim /* get information related with SIT */ 2882351df4b2SJaegeuk Kim sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; 2883351df4b2SJaegeuk Kim 2884351df4b2SJaegeuk Kim /* setup SIT bitmap from ckeckpoint pack */ 2885351df4b2SJaegeuk Kim bitmap_size = __bitmap_size(sbi, SIT_BITMAP); 2886351df4b2SJaegeuk Kim src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); 2887351df4b2SJaegeuk Kim 2888ae27d62eSChao Yu sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); 2889ae27d62eSChao Yu if (!sit_i->sit_bitmap) 2890351df4b2SJaegeuk Kim return -ENOMEM; 2891351df4b2SJaegeuk Kim 2892ae27d62eSChao Yu #ifdef CONFIG_F2FS_CHECK_FS 2893ae27d62eSChao Yu sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); 2894ae27d62eSChao Yu if (!sit_i->sit_bitmap_mir) 2895ae27d62eSChao Yu return -ENOMEM; 2896ae27d62eSChao Yu #endif 2897ae27d62eSChao Yu 2898351df4b2SJaegeuk Kim /* init SIT information */ 2899351df4b2SJaegeuk Kim sit_i->s_ops = &default_salloc_ops; 2900351df4b2SJaegeuk Kim 2901351df4b2SJaegeuk Kim sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); 2902351df4b2SJaegeuk Kim sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; 2903c79b7ff1SJaegeuk Kim sit_i->written_valid_blocks = 0; 2904351df4b2SJaegeuk Kim sit_i->bitmap_size = bitmap_size; 2905351df4b2SJaegeuk Kim sit_i->dirty_sentries = 0; 2906351df4b2SJaegeuk Kim sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; 2907351df4b2SJaegeuk Kim sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); 2908351df4b2SJaegeuk Kim sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; 2909351df4b2SJaegeuk Kim mutex_init(&sit_i->sentry_lock); 2910351df4b2SJaegeuk Kim return 0; 2911351df4b2SJaegeuk Kim } 2912351df4b2SJaegeuk Kim 2913351df4b2SJaegeuk Kim static int build_free_segmap(struct f2fs_sb_info *sbi) 2914351df4b2SJaegeuk Kim { 2915351df4b2SJaegeuk Kim struct free_segmap_info *free_i; 2916351df4b2SJaegeuk Kim unsigned int bitmap_size, sec_bitmap_size; 2917351df4b2SJaegeuk Kim 2918351df4b2SJaegeuk Kim /* allocate memory for free segmap information */ 2919351df4b2SJaegeuk Kim free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL); 2920351df4b2SJaegeuk Kim if (!free_i) 2921351df4b2SJaegeuk Kim return -ENOMEM; 2922351df4b2SJaegeuk Kim 2923351df4b2SJaegeuk Kim SM_I(sbi)->free_info = free_i; 2924351df4b2SJaegeuk Kim 29257cd8558bSJaegeuk Kim bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 292639307a8eSJaegeuk Kim free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL); 2927351df4b2SJaegeuk Kim if (!free_i->free_segmap) 2928351df4b2SJaegeuk Kim return -ENOMEM; 2929351df4b2SJaegeuk Kim 29307cd8558bSJaegeuk Kim sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 293139307a8eSJaegeuk Kim free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL); 2932351df4b2SJaegeuk Kim if (!free_i->free_secmap) 2933351df4b2SJaegeuk Kim return -ENOMEM; 2934351df4b2SJaegeuk Kim 2935351df4b2SJaegeuk Kim /* set all segments as dirty temporarily */ 2936351df4b2SJaegeuk Kim memset(free_i->free_segmap, 0xff, bitmap_size); 2937351df4b2SJaegeuk Kim memset(free_i->free_secmap, 0xff, sec_bitmap_size); 2938351df4b2SJaegeuk Kim 2939351df4b2SJaegeuk Kim /* init free segmap information */ 29407cd8558bSJaegeuk Kim free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); 2941351df4b2SJaegeuk Kim free_i->free_segments = 0; 2942351df4b2SJaegeuk Kim free_i->free_sections = 0; 29431a118ccfSChao Yu spin_lock_init(&free_i->segmap_lock); 2944351df4b2SJaegeuk Kim return 0; 2945351df4b2SJaegeuk Kim } 2946351df4b2SJaegeuk Kim 2947351df4b2SJaegeuk Kim static int build_curseg(struct f2fs_sb_info *sbi) 2948351df4b2SJaegeuk Kim { 29491042d60fSNamjae Jeon struct curseg_info *array; 2950351df4b2SJaegeuk Kim int i; 2951351df4b2SJaegeuk Kim 2952b434babfSFabian Frederick array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL); 2953351df4b2SJaegeuk Kim if (!array) 2954351df4b2SJaegeuk Kim return -ENOMEM; 2955351df4b2SJaegeuk Kim 2956351df4b2SJaegeuk Kim SM_I(sbi)->curseg_array = array; 2957351df4b2SJaegeuk Kim 2958351df4b2SJaegeuk Kim for (i = 0; i < NR_CURSEG_TYPE; i++) { 2959351df4b2SJaegeuk Kim mutex_init(&array[i].curseg_mutex); 296009cbfeafSKirill A. Shutemov array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL); 2961351df4b2SJaegeuk Kim if (!array[i].sum_blk) 2962351df4b2SJaegeuk Kim return -ENOMEM; 2963b7ad7512SChao Yu init_rwsem(&array[i].journal_rwsem); 2964b7ad7512SChao Yu array[i].journal = kzalloc(sizeof(struct f2fs_journal), 2965b7ad7512SChao Yu GFP_KERNEL); 2966b7ad7512SChao Yu if (!array[i].journal) 2967b7ad7512SChao Yu return -ENOMEM; 2968351df4b2SJaegeuk Kim array[i].segno = NULL_SEGNO; 2969351df4b2SJaegeuk Kim array[i].next_blkoff = 0; 2970351df4b2SJaegeuk Kim } 2971351df4b2SJaegeuk Kim return restore_curseg_summaries(sbi); 2972351df4b2SJaegeuk Kim } 2973351df4b2SJaegeuk Kim 2974351df4b2SJaegeuk Kim static void build_sit_entries(struct f2fs_sb_info *sbi) 2975351df4b2SJaegeuk Kim { 2976351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 2977351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 2978b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal; 29799c094040SYunlei He struct seg_entry *se; 29809c094040SYunlei He struct f2fs_sit_entry sit; 298174de593aSChao Yu int sit_blk_cnt = SIT_BLK_CNT(sbi); 298274de593aSChao Yu unsigned int i, start, end; 298374de593aSChao Yu unsigned int readed, start_blk = 0; 2984351df4b2SJaegeuk Kim 298574de593aSChao Yu do { 2986664ba972SJaegeuk Kim readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES, 2987664ba972SJaegeuk Kim META_SIT, true); 298874de593aSChao Yu 298974de593aSChao Yu start = start_blk * sit_i->sents_per_block; 299074de593aSChao Yu end = (start_blk + readed) * sit_i->sents_per_block; 299174de593aSChao Yu 29927cd8558bSJaegeuk Kim for (; start < end && start < MAIN_SEGS(sbi); start++) { 2993351df4b2SJaegeuk Kim struct f2fs_sit_block *sit_blk; 2994351df4b2SJaegeuk Kim struct page *page; 2995351df4b2SJaegeuk Kim 29969c094040SYunlei He se = &sit_i->sentries[start]; 2997351df4b2SJaegeuk Kim page = get_current_sit_page(sbi, start); 2998351df4b2SJaegeuk Kim sit_blk = (struct f2fs_sit_block *)page_address(page); 2999351df4b2SJaegeuk Kim sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; 3000351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 3001d600af23SChao Yu 3002351df4b2SJaegeuk Kim check_block_count(sbi, start, &sit); 3003351df4b2SJaegeuk Kim seg_info_from_raw_sit(se, &sit); 3004a66cdd98SJaegeuk Kim 3005a66cdd98SJaegeuk Kim /* build discard map only one time */ 30063e025740SJaegeuk Kim if (f2fs_discard_en(sbi)) { 30073e025740SJaegeuk Kim memcpy(se->discard_map, se->cur_valid_map, 30083e025740SJaegeuk Kim SIT_VBLOCK_MAP_SIZE); 30093e025740SJaegeuk Kim sbi->discard_blks += sbi->blocks_per_seg - 30103e025740SJaegeuk Kim se->valid_blocks; 30113e025740SJaegeuk Kim } 3012a66cdd98SJaegeuk Kim 3013d600af23SChao Yu if (sbi->segs_per_sec > 1) 3014d600af23SChao Yu get_sec_entry(sbi, start)->valid_blocks += 3015d600af23SChao Yu se->valid_blocks; 3016351df4b2SJaegeuk Kim } 301774de593aSChao Yu start_blk += readed; 301874de593aSChao Yu } while (start_blk < sit_blk_cnt); 3019d600af23SChao Yu 3020d600af23SChao Yu down_read(&curseg->journal_rwsem); 3021d600af23SChao Yu for (i = 0; i < sits_in_cursum(journal); i++) { 3022d600af23SChao Yu unsigned int old_valid_blocks; 3023d600af23SChao Yu 3024d600af23SChao Yu start = le32_to_cpu(segno_in_journal(journal, i)); 3025d600af23SChao Yu se = &sit_i->sentries[start]; 3026d600af23SChao Yu sit = sit_in_journal(journal, i); 3027d600af23SChao Yu 3028d600af23SChao Yu old_valid_blocks = se->valid_blocks; 3029d600af23SChao Yu 3030d600af23SChao Yu check_block_count(sbi, start, &sit); 3031d600af23SChao Yu seg_info_from_raw_sit(se, &sit); 3032d600af23SChao Yu 3033d600af23SChao Yu if (f2fs_discard_en(sbi)) { 3034d600af23SChao Yu memcpy(se->discard_map, se->cur_valid_map, 3035d600af23SChao Yu SIT_VBLOCK_MAP_SIZE); 3036d600af23SChao Yu sbi->discard_blks += old_valid_blocks - 3037d600af23SChao Yu se->valid_blocks; 3038d600af23SChao Yu } 3039d600af23SChao Yu 3040d600af23SChao Yu if (sbi->segs_per_sec > 1) 3041d600af23SChao Yu get_sec_entry(sbi, start)->valid_blocks += 3042d600af23SChao Yu se->valid_blocks - old_valid_blocks; 3043d600af23SChao Yu } 3044d600af23SChao Yu up_read(&curseg->journal_rwsem); 3045351df4b2SJaegeuk Kim } 3046351df4b2SJaegeuk Kim 3047351df4b2SJaegeuk Kim static void init_free_segmap(struct f2fs_sb_info *sbi) 3048351df4b2SJaegeuk Kim { 3049351df4b2SJaegeuk Kim unsigned int start; 3050351df4b2SJaegeuk Kim int type; 3051351df4b2SJaegeuk Kim 30527cd8558bSJaegeuk Kim for (start = 0; start < MAIN_SEGS(sbi); start++) { 3053351df4b2SJaegeuk Kim struct seg_entry *sentry = get_seg_entry(sbi, start); 3054351df4b2SJaegeuk Kim if (!sentry->valid_blocks) 3055351df4b2SJaegeuk Kim __set_free(sbi, start); 3056c79b7ff1SJaegeuk Kim else 3057c79b7ff1SJaegeuk Kim SIT_I(sbi)->written_valid_blocks += 3058c79b7ff1SJaegeuk Kim sentry->valid_blocks; 3059351df4b2SJaegeuk Kim } 3060351df4b2SJaegeuk Kim 3061351df4b2SJaegeuk Kim /* set use the current segments */ 3062351df4b2SJaegeuk Kim for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { 3063351df4b2SJaegeuk Kim struct curseg_info *curseg_t = CURSEG_I(sbi, type); 3064351df4b2SJaegeuk Kim __set_test_and_inuse(sbi, curseg_t->segno); 3065351df4b2SJaegeuk Kim } 3066351df4b2SJaegeuk Kim } 3067351df4b2SJaegeuk Kim 3068351df4b2SJaegeuk Kim static void init_dirty_segmap(struct f2fs_sb_info *sbi) 3069351df4b2SJaegeuk Kim { 3070351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 3071351df4b2SJaegeuk Kim struct free_segmap_info *free_i = FREE_I(sbi); 30727cd8558bSJaegeuk Kim unsigned int segno = 0, offset = 0; 3073351df4b2SJaegeuk Kim unsigned short valid_blocks; 3074351df4b2SJaegeuk Kim 30758736fbf0SNamjae Jeon while (1) { 3076351df4b2SJaegeuk Kim /* find dirty segment based on free segmap */ 30777cd8558bSJaegeuk Kim segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset); 30787cd8558bSJaegeuk Kim if (segno >= MAIN_SEGS(sbi)) 3079351df4b2SJaegeuk Kim break; 3080351df4b2SJaegeuk Kim offset = segno + 1; 3081302bd348SJaegeuk Kim valid_blocks = get_valid_blocks(sbi, segno, false); 3082ec325b52SJaegeuk Kim if (valid_blocks == sbi->blocks_per_seg || !valid_blocks) 3083351df4b2SJaegeuk Kim continue; 3084ec325b52SJaegeuk Kim if (valid_blocks > sbi->blocks_per_seg) { 3085ec325b52SJaegeuk Kim f2fs_bug_on(sbi, 1); 3086ec325b52SJaegeuk Kim continue; 3087ec325b52SJaegeuk Kim } 3088351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 3089351df4b2SJaegeuk Kim __locate_dirty_segment(sbi, segno, DIRTY); 3090351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 3091351df4b2SJaegeuk Kim } 3092351df4b2SJaegeuk Kim } 3093351df4b2SJaegeuk Kim 30945ec4e49fSJaegeuk Kim static int init_victim_secmap(struct f2fs_sb_info *sbi) 3095351df4b2SJaegeuk Kim { 3096351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 30977cd8558bSJaegeuk Kim unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); 3098351df4b2SJaegeuk Kim 309939307a8eSJaegeuk Kim dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); 31005ec4e49fSJaegeuk Kim if (!dirty_i->victim_secmap) 3101351df4b2SJaegeuk Kim return -ENOMEM; 3102351df4b2SJaegeuk Kim return 0; 3103351df4b2SJaegeuk Kim } 3104351df4b2SJaegeuk Kim 3105351df4b2SJaegeuk Kim static int build_dirty_segmap(struct f2fs_sb_info *sbi) 3106351df4b2SJaegeuk Kim { 3107351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i; 3108351df4b2SJaegeuk Kim unsigned int bitmap_size, i; 3109351df4b2SJaegeuk Kim 3110351df4b2SJaegeuk Kim /* allocate memory for dirty segments list information */ 3111351df4b2SJaegeuk Kim dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL); 3112351df4b2SJaegeuk Kim if (!dirty_i) 3113351df4b2SJaegeuk Kim return -ENOMEM; 3114351df4b2SJaegeuk Kim 3115351df4b2SJaegeuk Kim SM_I(sbi)->dirty_info = dirty_i; 3116351df4b2SJaegeuk Kim mutex_init(&dirty_i->seglist_lock); 3117351df4b2SJaegeuk Kim 31187cd8558bSJaegeuk Kim bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 3119351df4b2SJaegeuk Kim 3120351df4b2SJaegeuk Kim for (i = 0; i < NR_DIRTY_TYPE; i++) { 312139307a8eSJaegeuk Kim dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); 3122351df4b2SJaegeuk Kim if (!dirty_i->dirty_segmap[i]) 3123351df4b2SJaegeuk Kim return -ENOMEM; 3124351df4b2SJaegeuk Kim } 3125351df4b2SJaegeuk Kim 3126351df4b2SJaegeuk Kim init_dirty_segmap(sbi); 31275ec4e49fSJaegeuk Kim return init_victim_secmap(sbi); 3128351df4b2SJaegeuk Kim } 3129351df4b2SJaegeuk Kim 31300a8165d7SJaegeuk Kim /* 3131351df4b2SJaegeuk Kim * Update min, max modified time for cost-benefit GC algorithm 3132351df4b2SJaegeuk Kim */ 3133351df4b2SJaegeuk Kim static void init_min_max_mtime(struct f2fs_sb_info *sbi) 3134351df4b2SJaegeuk Kim { 3135351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 3136351df4b2SJaegeuk Kim unsigned int segno; 3137351df4b2SJaegeuk Kim 3138351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 3139351df4b2SJaegeuk Kim 3140351df4b2SJaegeuk Kim sit_i->min_mtime = LLONG_MAX; 3141351df4b2SJaegeuk Kim 31427cd8558bSJaegeuk Kim for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) { 3143351df4b2SJaegeuk Kim unsigned int i; 3144351df4b2SJaegeuk Kim unsigned long long mtime = 0; 3145351df4b2SJaegeuk Kim 3146351df4b2SJaegeuk Kim for (i = 0; i < sbi->segs_per_sec; i++) 3147351df4b2SJaegeuk Kim mtime += get_seg_entry(sbi, segno + i)->mtime; 3148351df4b2SJaegeuk Kim 3149351df4b2SJaegeuk Kim mtime = div_u64(mtime, sbi->segs_per_sec); 3150351df4b2SJaegeuk Kim 3151351df4b2SJaegeuk Kim if (sit_i->min_mtime > mtime) 3152351df4b2SJaegeuk Kim sit_i->min_mtime = mtime; 3153351df4b2SJaegeuk Kim } 3154351df4b2SJaegeuk Kim sit_i->max_mtime = get_mtime(sbi); 3155351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 3156351df4b2SJaegeuk Kim } 3157351df4b2SJaegeuk Kim 3158351df4b2SJaegeuk Kim int build_segment_manager(struct f2fs_sb_info *sbi) 3159351df4b2SJaegeuk Kim { 3160351df4b2SJaegeuk Kim struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 3161351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 31621042d60fSNamjae Jeon struct f2fs_sm_info *sm_info; 3163351df4b2SJaegeuk Kim int err; 3164351df4b2SJaegeuk Kim 3165351df4b2SJaegeuk Kim sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); 3166351df4b2SJaegeuk Kim if (!sm_info) 3167351df4b2SJaegeuk Kim return -ENOMEM; 3168351df4b2SJaegeuk Kim 3169351df4b2SJaegeuk Kim /* init sm info */ 3170351df4b2SJaegeuk Kim sbi->sm_info = sm_info; 3171351df4b2SJaegeuk Kim sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 3172351df4b2SJaegeuk Kim sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 3173351df4b2SJaegeuk Kim sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 3174351df4b2SJaegeuk Kim sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 3175351df4b2SJaegeuk Kim sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 3176351df4b2SJaegeuk Kim sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); 3177351df4b2SJaegeuk Kim sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 317858c41035SJaegeuk Kim sm_info->rec_prefree_segments = sm_info->main_segments * 317958c41035SJaegeuk Kim DEF_RECLAIM_PREFREE_SEGMENTS / 100; 318044a83499SJaegeuk Kim if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS) 318144a83499SJaegeuk Kim sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS; 318244a83499SJaegeuk Kim 318352763a4bSJaegeuk Kim if (!test_opt(sbi, LFS)) 31849b5f136fSJaegeuk Kim sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC; 3185216fbd64SJaegeuk Kim sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; 3186c1ce1b02SJaegeuk Kim sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; 3187ef095d19SJaegeuk Kim sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS; 3188351df4b2SJaegeuk Kim 3189bba681cbSJaegeuk Kim sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS; 3190bba681cbSJaegeuk Kim 3191184a5cd2SChao Yu INIT_LIST_HEAD(&sm_info->sit_entry_set); 3192184a5cd2SChao Yu 3193b270ad6fSGu Zheng if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) { 31942163d198SGu Zheng err = create_flush_cmd_control(sbi); 31952163d198SGu Zheng if (err) 3196a688b9d9SGu Zheng return err; 3197a688b9d9SGu Zheng } 31986b4afdd7SJaegeuk Kim 31990b54fb84SJaegeuk Kim err = create_discard_cmd_control(sbi); 32000b54fb84SJaegeuk Kim if (err) 32010b54fb84SJaegeuk Kim return err; 32020b54fb84SJaegeuk Kim 3203351df4b2SJaegeuk Kim err = build_sit_info(sbi); 3204351df4b2SJaegeuk Kim if (err) 3205351df4b2SJaegeuk Kim return err; 3206351df4b2SJaegeuk Kim err = build_free_segmap(sbi); 3207351df4b2SJaegeuk Kim if (err) 3208351df4b2SJaegeuk Kim return err; 3209351df4b2SJaegeuk Kim err = build_curseg(sbi); 3210351df4b2SJaegeuk Kim if (err) 3211351df4b2SJaegeuk Kim return err; 3212351df4b2SJaegeuk Kim 3213351df4b2SJaegeuk Kim /* reinit free segmap based on SIT */ 3214351df4b2SJaegeuk Kim build_sit_entries(sbi); 3215351df4b2SJaegeuk Kim 3216351df4b2SJaegeuk Kim init_free_segmap(sbi); 3217351df4b2SJaegeuk Kim err = build_dirty_segmap(sbi); 3218351df4b2SJaegeuk Kim if (err) 3219351df4b2SJaegeuk Kim return err; 3220351df4b2SJaegeuk Kim 3221351df4b2SJaegeuk Kim init_min_max_mtime(sbi); 3222351df4b2SJaegeuk Kim return 0; 3223351df4b2SJaegeuk Kim } 3224351df4b2SJaegeuk Kim 3225351df4b2SJaegeuk Kim static void discard_dirty_segmap(struct f2fs_sb_info *sbi, 3226351df4b2SJaegeuk Kim enum dirty_type dirty_type) 3227351df4b2SJaegeuk Kim { 3228351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 3229351df4b2SJaegeuk Kim 3230351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 323139307a8eSJaegeuk Kim kvfree(dirty_i->dirty_segmap[dirty_type]); 3232351df4b2SJaegeuk Kim dirty_i->nr_dirty[dirty_type] = 0; 3233351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 3234351df4b2SJaegeuk Kim } 3235351df4b2SJaegeuk Kim 32365ec4e49fSJaegeuk Kim static void destroy_victim_secmap(struct f2fs_sb_info *sbi) 3237351df4b2SJaegeuk Kim { 3238351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 323939307a8eSJaegeuk Kim kvfree(dirty_i->victim_secmap); 3240351df4b2SJaegeuk Kim } 3241351df4b2SJaegeuk Kim 3242351df4b2SJaegeuk Kim static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) 3243351df4b2SJaegeuk Kim { 3244351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 3245351df4b2SJaegeuk Kim int i; 3246351df4b2SJaegeuk Kim 3247351df4b2SJaegeuk Kim if (!dirty_i) 3248351df4b2SJaegeuk Kim return; 3249351df4b2SJaegeuk Kim 3250351df4b2SJaegeuk Kim /* discard pre-free/dirty segments list */ 3251351df4b2SJaegeuk Kim for (i = 0; i < NR_DIRTY_TYPE; i++) 3252351df4b2SJaegeuk Kim discard_dirty_segmap(sbi, i); 3253351df4b2SJaegeuk Kim 32545ec4e49fSJaegeuk Kim destroy_victim_secmap(sbi); 3255351df4b2SJaegeuk Kim SM_I(sbi)->dirty_info = NULL; 3256351df4b2SJaegeuk Kim kfree(dirty_i); 3257351df4b2SJaegeuk Kim } 3258351df4b2SJaegeuk Kim 3259351df4b2SJaegeuk Kim static void destroy_curseg(struct f2fs_sb_info *sbi) 3260351df4b2SJaegeuk Kim { 3261351df4b2SJaegeuk Kim struct curseg_info *array = SM_I(sbi)->curseg_array; 3262351df4b2SJaegeuk Kim int i; 3263351df4b2SJaegeuk Kim 3264351df4b2SJaegeuk Kim if (!array) 3265351df4b2SJaegeuk Kim return; 3266351df4b2SJaegeuk Kim SM_I(sbi)->curseg_array = NULL; 3267b7ad7512SChao Yu for (i = 0; i < NR_CURSEG_TYPE; i++) { 3268351df4b2SJaegeuk Kim kfree(array[i].sum_blk); 3269b7ad7512SChao Yu kfree(array[i].journal); 3270b7ad7512SChao Yu } 3271351df4b2SJaegeuk Kim kfree(array); 3272351df4b2SJaegeuk Kim } 3273351df4b2SJaegeuk Kim 3274351df4b2SJaegeuk Kim static void destroy_free_segmap(struct f2fs_sb_info *sbi) 3275351df4b2SJaegeuk Kim { 3276351df4b2SJaegeuk Kim struct free_segmap_info *free_i = SM_I(sbi)->free_info; 3277351df4b2SJaegeuk Kim if (!free_i) 3278351df4b2SJaegeuk Kim return; 3279351df4b2SJaegeuk Kim SM_I(sbi)->free_info = NULL; 328039307a8eSJaegeuk Kim kvfree(free_i->free_segmap); 328139307a8eSJaegeuk Kim kvfree(free_i->free_secmap); 3282351df4b2SJaegeuk Kim kfree(free_i); 3283351df4b2SJaegeuk Kim } 3284351df4b2SJaegeuk Kim 3285351df4b2SJaegeuk Kim static void destroy_sit_info(struct f2fs_sb_info *sbi) 3286351df4b2SJaegeuk Kim { 3287351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 3288351df4b2SJaegeuk Kim unsigned int start; 3289351df4b2SJaegeuk Kim 3290351df4b2SJaegeuk Kim if (!sit_i) 3291351df4b2SJaegeuk Kim return; 3292351df4b2SJaegeuk Kim 3293351df4b2SJaegeuk Kim if (sit_i->sentries) { 32947cd8558bSJaegeuk Kim for (start = 0; start < MAIN_SEGS(sbi); start++) { 3295351df4b2SJaegeuk Kim kfree(sit_i->sentries[start].cur_valid_map); 3296355e7891SChao Yu #ifdef CONFIG_F2FS_CHECK_FS 3297355e7891SChao Yu kfree(sit_i->sentries[start].cur_valid_map_mir); 3298355e7891SChao Yu #endif 3299351df4b2SJaegeuk Kim kfree(sit_i->sentries[start].ckpt_valid_map); 3300a66cdd98SJaegeuk Kim kfree(sit_i->sentries[start].discard_map); 3301351df4b2SJaegeuk Kim } 3302351df4b2SJaegeuk Kim } 330360a3b782SJaegeuk Kim kfree(sit_i->tmp_map); 330460a3b782SJaegeuk Kim 330539307a8eSJaegeuk Kim kvfree(sit_i->sentries); 330639307a8eSJaegeuk Kim kvfree(sit_i->sec_entries); 330739307a8eSJaegeuk Kim kvfree(sit_i->dirty_sentries_bitmap); 3308351df4b2SJaegeuk Kim 3309351df4b2SJaegeuk Kim SM_I(sbi)->sit_info = NULL; 3310351df4b2SJaegeuk Kim kfree(sit_i->sit_bitmap); 3311ae27d62eSChao Yu #ifdef CONFIG_F2FS_CHECK_FS 3312ae27d62eSChao Yu kfree(sit_i->sit_bitmap_mir); 3313ae27d62eSChao Yu #endif 3314351df4b2SJaegeuk Kim kfree(sit_i); 3315351df4b2SJaegeuk Kim } 3316351df4b2SJaegeuk Kim 3317351df4b2SJaegeuk Kim void destroy_segment_manager(struct f2fs_sb_info *sbi) 3318351df4b2SJaegeuk Kim { 3319351df4b2SJaegeuk Kim struct f2fs_sm_info *sm_info = SM_I(sbi); 3320a688b9d9SGu Zheng 33213b03f724SChao Yu if (!sm_info) 33223b03f724SChao Yu return; 33235eba8c5dSJaegeuk Kim destroy_flush_cmd_control(sbi, true); 3324f099405fSChao Yu destroy_discard_cmd_control(sbi); 3325351df4b2SJaegeuk Kim destroy_dirty_segmap(sbi); 3326351df4b2SJaegeuk Kim destroy_curseg(sbi); 3327351df4b2SJaegeuk Kim destroy_free_segmap(sbi); 3328351df4b2SJaegeuk Kim destroy_sit_info(sbi); 3329351df4b2SJaegeuk Kim sbi->sm_info = NULL; 3330351df4b2SJaegeuk Kim kfree(sm_info); 3331351df4b2SJaegeuk Kim } 33327fd9e544SJaegeuk Kim 33337fd9e544SJaegeuk Kim int __init create_segment_manager_caches(void) 33347fd9e544SJaegeuk Kim { 33357fd9e544SJaegeuk Kim discard_entry_slab = f2fs_kmem_cache_create("discard_entry", 3336e8512d2eSGu Zheng sizeof(struct discard_entry)); 33377fd9e544SJaegeuk Kim if (!discard_entry_slab) 3338184a5cd2SChao Yu goto fail; 3339184a5cd2SChao Yu 3340b01a9201SJaegeuk Kim discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd", 3341b01a9201SJaegeuk Kim sizeof(struct discard_cmd)); 3342b01a9201SJaegeuk Kim if (!discard_cmd_slab) 33436ab2a308SChao Yu goto destroy_discard_entry; 3344275b66b0SChao Yu 3345184a5cd2SChao Yu sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set", 3346c9ee0085SChangman Lee sizeof(struct sit_entry_set)); 3347184a5cd2SChao Yu if (!sit_entry_set_slab) 3348b01a9201SJaegeuk Kim goto destroy_discard_cmd; 334988b88a66SJaegeuk Kim 335088b88a66SJaegeuk Kim inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry", 335188b88a66SJaegeuk Kim sizeof(struct inmem_pages)); 335288b88a66SJaegeuk Kim if (!inmem_entry_slab) 335388b88a66SJaegeuk Kim goto destroy_sit_entry_set; 33547fd9e544SJaegeuk Kim return 0; 3355184a5cd2SChao Yu 335688b88a66SJaegeuk Kim destroy_sit_entry_set: 335788b88a66SJaegeuk Kim kmem_cache_destroy(sit_entry_set_slab); 3358b01a9201SJaegeuk Kim destroy_discard_cmd: 3359b01a9201SJaegeuk Kim kmem_cache_destroy(discard_cmd_slab); 33606ab2a308SChao Yu destroy_discard_entry: 3361184a5cd2SChao Yu kmem_cache_destroy(discard_entry_slab); 3362184a5cd2SChao Yu fail: 3363184a5cd2SChao Yu return -ENOMEM; 33647fd9e544SJaegeuk Kim } 33657fd9e544SJaegeuk Kim 33667fd9e544SJaegeuk Kim void destroy_segment_manager_caches(void) 33677fd9e544SJaegeuk Kim { 3368184a5cd2SChao Yu kmem_cache_destroy(sit_entry_set_slab); 3369b01a9201SJaegeuk Kim kmem_cache_destroy(discard_cmd_slab); 33707fd9e544SJaegeuk Kim kmem_cache_destroy(discard_entry_slab); 337188b88a66SJaegeuk Kim kmem_cache_destroy(inmem_entry_slab); 33727fd9e544SJaegeuk Kim } 3373