node.c (4ba24fef3eb3b142197135223b90ced2f319cd53) | node.c (caf0047e7e1e60a7ad1d655d3b81b32e2dfb6095) |
---|---|
1/* 2 * fs/f2fs/node.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 5 unchanged lines hidden (view full) --- 14#include <linux/backing-dev.h> 15#include <linux/blkdev.h> 16#include <linux/pagevec.h> 17#include <linux/swap.h> 18 19#include "f2fs.h" 20#include "node.h" 21#include "segment.h" | 1/* 2 * fs/f2fs/node.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 5 unchanged lines hidden (view full) --- 14#include <linux/backing-dev.h> 15#include <linux/blkdev.h> 16#include <linux/pagevec.h> 17#include <linux/swap.h> 18 19#include "f2fs.h" 20#include "node.h" 21#include "segment.h" |
22#include "trace.h" |
|
22#include <trace/events/f2fs.h> 23 24#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock) 25 26static struct kmem_cache *nat_entry_slab; 27static struct kmem_cache *free_nid_slab; 28static struct kmem_cache *nat_entry_set_slab; 29 --- 22 unchanged lines hidden (view full) --- 52 } else if (type == DIRTY_DENTS) { 53 if (sbi->sb->s_bdi->dirty_exceeded) 54 return false; 55 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 56 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 57 } else if (type == INO_ENTRIES) { 58 int i; 59 | 23#include <trace/events/f2fs.h> 24 25#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock) 26 27static struct kmem_cache *nat_entry_slab; 28static struct kmem_cache *free_nid_slab; 29static struct kmem_cache *nat_entry_set_slab; 30 --- 22 unchanged lines hidden (view full) --- 53 } else if (type == DIRTY_DENTS) { 54 if (sbi->sb->s_bdi->dirty_exceeded) 55 return false; 56 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 57 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 58 } else if (type == INO_ENTRIES) { 59 int i; 60 |
60 if (sbi->sb->s_bdi->dirty_exceeded) 61 return false; | |
62 for (i = 0; i <= UPDATE_INO; i++) 63 mem_size += (sbi->im[i].ino_num * 64 sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT; 65 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); | 61 for (i = 0; i <= UPDATE_INO; i++) 62 mem_size += (sbi->im[i].ino_num * 63 sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT; 64 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); |
65 } else { 66 if (sbi->sb->s_bdi->dirty_exceeded) 67 return false; |
|
66 } 67 return res; 68} 69 70static void clear_node_page_dirty(struct page *page) 71{ 72 struct address_space *mapping = page->mapping; 73 unsigned int long flags; --- 189 unchanged lines hidden (view full) --- 263{ 264 struct f2fs_nm_info *nm_i = NM_I(sbi); 265 struct nat_entry *e; 266 267 down_write(&nm_i->nat_tree_lock); 268 e = __lookup_nat_cache(nm_i, ni->nid); 269 if (!e) { 270 e = grab_nat_entry(nm_i, ni->nid); | 68 } 69 return res; 70} 71 72static void clear_node_page_dirty(struct page *page) 73{ 74 struct address_space *mapping = page->mapping; 75 unsigned int long flags; --- 189 unchanged lines hidden (view full) --- 265{ 266 struct f2fs_nm_info *nm_i = NM_I(sbi); 267 struct nat_entry *e; 268 269 down_write(&nm_i->nat_tree_lock); 270 e = __lookup_nat_cache(nm_i, ni->nid); 271 if (!e) { 272 e = grab_nat_entry(nm_i, ni->nid); |
271 e->ni = *ni; | 273 copy_node_info(&e->ni, ni); |
272 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 273 } else if (new_blkaddr == NEW_ADDR) { 274 /* 275 * when nid is reallocated, 276 * previous nat entry can be remained in nat cache. 277 * So, reinitialize it with new information. 278 */ | 274 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 275 } else if (new_blkaddr == NEW_ADDR) { 276 /* 277 * when nid is reallocated, 278 * previous nat entry can be remained in nat cache. 279 * So, reinitialize it with new information. 280 */ |
279 e->ni = *ni; | 281 copy_node_info(&e->ni, ni); |
280 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 281 } 282 283 /* sanity check */ 284 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 285 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && 286 new_blkaddr == NULL_ADDR); 287 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && --- 53 unchanged lines hidden (view full) --- 341 struct f2fs_summary_block *sum = curseg->sum_blk; 342 nid_t start_nid = START_NID(nid); 343 struct f2fs_nat_block *nat_blk; 344 struct page *page = NULL; 345 struct f2fs_nat_entry ne; 346 struct nat_entry *e; 347 int i; 348 | 282 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 283 } 284 285 /* sanity check */ 286 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 287 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && 288 new_blkaddr == NULL_ADDR); 289 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && --- 53 unchanged lines hidden (view full) --- 343 struct f2fs_summary_block *sum = curseg->sum_blk; 344 nid_t start_nid = START_NID(nid); 345 struct f2fs_nat_block *nat_blk; 346 struct page *page = NULL; 347 struct f2fs_nat_entry ne; 348 struct nat_entry *e; 349 int i; 350 |
349 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); | |
350 ni->nid = nid; 351 352 /* Check nat cache */ 353 down_read(&nm_i->nat_tree_lock); 354 e = __lookup_nat_cache(nm_i, nid); 355 if (e) { 356 ni->ino = nat_get_ino(e); 357 ni->blk_addr = nat_get_blkaddr(e); 358 ni->version = nat_get_version(e); 359 } 360 up_read(&nm_i->nat_tree_lock); 361 if (e) 362 return; 363 | 351 ni->nid = nid; 352 353 /* Check nat cache */ 354 down_read(&nm_i->nat_tree_lock); 355 e = __lookup_nat_cache(nm_i, nid); 356 if (e) { 357 ni->ino = nat_get_ino(e); 358 ni->blk_addr = nat_get_blkaddr(e); 359 ni->version = nat_get_version(e); 360 } 361 up_read(&nm_i->nat_tree_lock); 362 if (e) 363 return; 364 |
365 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 366 |
|
364 /* Check current segment summary */ 365 mutex_lock(&curseg->curseg_mutex); 366 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); 367 if (i >= 0) { 368 ne = nat_in_journal(sum, i); 369 node_info_from_raw_nat(ni, &ne); 370 } 371 mutex_unlock(&curseg->curseg_mutex); --- 208 unchanged lines hidden (view full) --- 580 if (dn->nid == dn->inode->i_ino) { 581 remove_orphan_inode(sbi, dn->nid); 582 dec_valid_inode_count(sbi); 583 } else { 584 sync_inode_page(dn); 585 } 586invalidate: 587 clear_node_page_dirty(dn->node_page); | 367 /* Check current segment summary */ 368 mutex_lock(&curseg->curseg_mutex); 369 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); 370 if (i >= 0) { 371 ne = nat_in_journal(sum, i); 372 node_info_from_raw_nat(ni, &ne); 373 } 374 mutex_unlock(&curseg->curseg_mutex); --- 208 unchanged lines hidden (view full) --- 583 if (dn->nid == dn->inode->i_ino) { 584 remove_orphan_inode(sbi, dn->nid); 585 dec_valid_inode_count(sbi); 586 } else { 587 sync_inode_page(dn); 588 } 589invalidate: 590 clear_node_page_dirty(dn->node_page); |
588 F2FS_SET_SB_DIRT(sbi); | 591 set_sbi_flag(sbi, SBI_IS_DIRTY); |
589 590 f2fs_put_page(dn->node_page, 1); 591 592 invalidate_mapping_pages(NODE_MAPPING(sbi), 593 dn->node_page->index, dn->node_page->index); 594 595 dn->node_page = NULL; 596 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); --- 374 unchanged lines hidden (view full) --- 971 * 0: f2fs_put_page(page, 0) 972 * LOCKED_PAGE: f2fs_put_page(page, 1) 973 * error: nothing 974 */ 975static int read_node_page(struct page *page, int rw) 976{ 977 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 978 struct node_info ni; | 592 593 f2fs_put_page(dn->node_page, 1); 594 595 invalidate_mapping_pages(NODE_MAPPING(sbi), 596 dn->node_page->index, dn->node_page->index); 597 598 dn->node_page = NULL; 599 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); --- 374 unchanged lines hidden (view full) --- 974 * 0: f2fs_put_page(page, 0) 975 * LOCKED_PAGE: f2fs_put_page(page, 1) 976 * error: nothing 977 */ 978static int read_node_page(struct page *page, int rw) 979{ 980 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 981 struct node_info ni; |
982 struct f2fs_io_info fio = { 983 .type = NODE, 984 .rw = rw, 985 }; |
|
979 980 get_node_info(sbi, page->index, &ni); 981 982 if (unlikely(ni.blk_addr == NULL_ADDR)) { 983 f2fs_put_page(page, 1); 984 return -ENOENT; 985 } 986 987 if (PageUptodate(page)) 988 return LOCKED_PAGE; 989 | 986 987 get_node_info(sbi, page->index, &ni); 988 989 if (unlikely(ni.blk_addr == NULL_ADDR)) { 990 f2fs_put_page(page, 1); 991 return -ENOENT; 992 } 993 994 if (PageUptodate(page)) 995 return LOCKED_PAGE; 996 |
990 return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw); | 997 fio.blk_addr = ni.blk_addr; 998 return f2fs_submit_page_bio(sbi, page, &fio); |
991} 992 993/* 994 * Readahead a node page 995 */ 996void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 997{ 998 struct page *apage; --- 264 unchanged lines hidden (view full) --- 1263 return ret; 1264} 1265 1266static int f2fs_write_node_page(struct page *page, 1267 struct writeback_control *wbc) 1268{ 1269 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1270 nid_t nid; | 999} 1000 1001/* 1002 * Readahead a node page 1003 */ 1004void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 1005{ 1006 struct page *apage; --- 264 unchanged lines hidden (view full) --- 1271 return ret; 1272} 1273 1274static int f2fs_write_node_page(struct page *page, 1275 struct writeback_control *wbc) 1276{ 1277 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1278 nid_t nid; |
1271 block_t new_addr; | |
1272 struct node_info ni; 1273 struct f2fs_io_info fio = { 1274 .type = NODE, 1275 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 1276 }; 1277 1278 trace_f2fs_writepage(page, NODE); 1279 | 1279 struct node_info ni; 1280 struct f2fs_io_info fio = { 1281 .type = NODE, 1282 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 1283 }; 1284 1285 trace_f2fs_writepage(page, NODE); 1286 |
1280 if (unlikely(sbi->por_doing)) | 1287 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) |
1281 goto redirty_out; 1282 if (unlikely(f2fs_cp_error(sbi))) 1283 goto redirty_out; 1284 1285 f2fs_wait_on_page_writeback(page, NODE); 1286 1287 /* get old block addr of this node page */ 1288 nid = nid_of_node(page); --- 9 unchanged lines hidden (view full) --- 1298 } 1299 1300 if (wbc->for_reclaim) { 1301 if (!down_read_trylock(&sbi->node_write)) 1302 goto redirty_out; 1303 } else { 1304 down_read(&sbi->node_write); 1305 } | 1288 goto redirty_out; 1289 if (unlikely(f2fs_cp_error(sbi))) 1290 goto redirty_out; 1291 1292 f2fs_wait_on_page_writeback(page, NODE); 1293 1294 /* get old block addr of this node page */ 1295 nid = nid_of_node(page); --- 9 unchanged lines hidden (view full) --- 1305 } 1306 1307 if (wbc->for_reclaim) { 1308 if (!down_read_trylock(&sbi->node_write)) 1309 goto redirty_out; 1310 } else { 1311 down_read(&sbi->node_write); 1312 } |
1313 |
|
1306 set_page_writeback(page); | 1314 set_page_writeback(page); |
1307 write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr); 1308 set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page)); | 1315 fio.blk_addr = ni.blk_addr; 1316 write_node_page(sbi, page, nid, &fio); 1317 set_node_addr(sbi, &ni, fio.blk_addr, is_fsync_dnode(page)); |
1309 dec_page_count(sbi, F2FS_DIRTY_NODES); 1310 up_read(&sbi->node_write); 1311 unlock_page(page); 1312 1313 if (wbc->for_reclaim) 1314 f2fs_submit_merged_bio(sbi, NODE, WRITE); 1315 1316 return 0; --- 33 unchanged lines hidden (view full) --- 1350{ 1351 trace_f2fs_set_page_dirty(page, NODE); 1352 1353 SetPageUptodate(page); 1354 if (!PageDirty(page)) { 1355 __set_page_dirty_nobuffers(page); 1356 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 1357 SetPagePrivate(page); | 1318 dec_page_count(sbi, F2FS_DIRTY_NODES); 1319 up_read(&sbi->node_write); 1320 unlock_page(page); 1321 1322 if (wbc->for_reclaim) 1323 f2fs_submit_merged_bio(sbi, NODE, WRITE); 1324 1325 return 0; --- 33 unchanged lines hidden (view full) --- 1359{ 1360 trace_f2fs_set_page_dirty(page, NODE); 1361 1362 SetPageUptodate(page); 1363 if (!PageDirty(page)) { 1364 __set_page_dirty_nobuffers(page); 1365 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 1366 SetPagePrivate(page); |
1367 f2fs_trace_pid(page); |
|
1358 return 1; 1359 } 1360 return 0; 1361} 1362 1363static void f2fs_invalidate_node_page(struct page *page, unsigned int offset, 1364 unsigned int length) 1365{ --- 355 unchanged lines hidden (view full) --- 1721 WARN_ON(1); 1722 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1723 inc_valid_inode_count(sbi); 1724 set_page_dirty(ipage); 1725 f2fs_put_page(ipage, 1); 1726 return 0; 1727} 1728 | 1368 return 1; 1369 } 1370 return 0; 1371} 1372 1373static void f2fs_invalidate_node_page(struct page *page, unsigned int offset, 1374 unsigned int length) 1375{ --- 355 unchanged lines hidden (view full) --- 1731 WARN_ON(1); 1732 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1733 inc_valid_inode_count(sbi); 1734 set_page_dirty(ipage); 1735 f2fs_put_page(ipage, 1); 1736 return 0; 1737} 1738 |
1729/* 1730 * ra_sum_pages() merge contiguous pages into one bio and submit. 1731 * these pre-read pages are allocated in bd_inode's mapping tree. 1732 */ 1733static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages, 1734 int start, int nrpages) 1735{ 1736 struct inode *inode = sbi->sb->s_bdev->bd_inode; 1737 struct address_space *mapping = inode->i_mapping; 1738 int i, page_idx = start; 1739 struct f2fs_io_info fio = { 1740 .type = META, 1741 .rw = READ_SYNC | REQ_META | REQ_PRIO 1742 }; 1743 1744 for (i = 0; page_idx < start + nrpages; page_idx++, i++) { 1745 /* alloc page in bd_inode for reading node summary info */ 1746 pages[i] = grab_cache_page(mapping, page_idx); 1747 if (!pages[i]) 1748 break; 1749 f2fs_submit_page_mbio(sbi, pages[i], page_idx, &fio); 1750 } 1751 1752 f2fs_submit_merged_bio(sbi, META, READ); 1753 return i; 1754} 1755 | |
1756int restore_node_summary(struct f2fs_sb_info *sbi, 1757 unsigned int segno, struct f2fs_summary_block *sum) 1758{ 1759 struct f2fs_node *rn; 1760 struct f2fs_summary *sum_entry; | 1739int restore_node_summary(struct f2fs_sb_info *sbi, 1740 unsigned int segno, struct f2fs_summary_block *sum) 1741{ 1742 struct f2fs_node *rn; 1743 struct f2fs_summary *sum_entry; |
1761 struct inode *inode = sbi->sb->s_bdev->bd_inode; | |
1762 block_t addr; 1763 int bio_blocks = MAX_BIO_BLOCKS(sbi); | 1744 block_t addr; 1745 int bio_blocks = MAX_BIO_BLOCKS(sbi); |
1764 struct page *pages[bio_blocks]; 1765 int i, idx, last_offset, nrpages, err = 0; | 1746 int i, idx, last_offset, nrpages; |
1766 1767 /* scan the node segment */ 1768 last_offset = sbi->blocks_per_seg; 1769 addr = START_BLOCK(sbi, segno); 1770 sum_entry = &sum->entries[0]; 1771 | 1747 1748 /* scan the node segment */ 1749 last_offset = sbi->blocks_per_seg; 1750 addr = START_BLOCK(sbi, segno); 1751 sum_entry = &sum->entries[0]; 1752 |
1772 for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) { | 1753 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { |
1773 nrpages = min(last_offset - i, bio_blocks); 1774 1775 /* readahead node pages */ | 1754 nrpages = min(last_offset - i, bio_blocks); 1755 1756 /* readahead node pages */ |
1776 nrpages = ra_sum_pages(sbi, pages, addr, nrpages); 1777 if (!nrpages) 1778 return -ENOMEM; | 1757 ra_meta_pages(sbi, addr, nrpages, META_POR); |
1779 | 1758 |
1780 for (idx = 0; idx < nrpages; idx++) { 1781 if (err) 1782 goto skip; | 1759 for (idx = addr; idx < addr + nrpages; idx++) { 1760 struct page *page = get_meta_page(sbi, idx); |
1783 | 1761 |
1784 lock_page(pages[idx]); 1785 if (unlikely(!PageUptodate(pages[idx]))) { 1786 err = -EIO; 1787 } else { 1788 rn = F2FS_NODE(pages[idx]); 1789 sum_entry->nid = rn->footer.nid; 1790 sum_entry->version = 0; 1791 sum_entry->ofs_in_node = 0; 1792 sum_entry++; 1793 } 1794 unlock_page(pages[idx]); 1795skip: 1796 page_cache_release(pages[idx]); | 1762 rn = F2FS_NODE(page); 1763 sum_entry->nid = rn->footer.nid; 1764 sum_entry->version = 0; 1765 sum_entry->ofs_in_node = 0; 1766 sum_entry++; 1767 f2fs_put_page(page, 1); |
1797 } 1798 | 1768 } 1769 |
1799 invalidate_mapping_pages(inode->i_mapping, addr, | 1770 invalidate_mapping_pages(META_MAPPING(sbi), addr, |
1800 addr + nrpages); 1801 } | 1771 addr + nrpages); 1772 } |
1802 return err; | 1773 return 0; |
1803} 1804 1805static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 1806{ 1807 struct f2fs_nm_info *nm_i = NM_I(sbi); 1808 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1809 struct f2fs_summary_block *sum = curseg->sum_blk; 1810 int i; --- 107 unchanged lines hidden (view full) --- 1918/* 1919 * This function is called during the checkpointing process. 1920 */ 1921void flush_nat_entries(struct f2fs_sb_info *sbi) 1922{ 1923 struct f2fs_nm_info *nm_i = NM_I(sbi); 1924 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1925 struct f2fs_summary_block *sum = curseg->sum_blk; | 1774} 1775 1776static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 1777{ 1778 struct f2fs_nm_info *nm_i = NM_I(sbi); 1779 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1780 struct f2fs_summary_block *sum = curseg->sum_blk; 1781 int i; --- 107 unchanged lines hidden (view full) --- 1889/* 1890 * This function is called during the checkpointing process. 1891 */ 1892void flush_nat_entries(struct f2fs_sb_info *sbi) 1893{ 1894 struct f2fs_nm_info *nm_i = NM_I(sbi); 1895 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1896 struct f2fs_summary_block *sum = curseg->sum_blk; |
1926 struct nat_entry_set *setvec[NATVEC_SIZE]; | 1897 struct nat_entry_set *setvec[SETVEC_SIZE]; |
1927 struct nat_entry_set *set, *tmp; 1928 unsigned int found; 1929 nid_t set_idx = 0; 1930 LIST_HEAD(sets); 1931 1932 if (!nm_i->dirty_nat_cnt) 1933 return; 1934 /* 1935 * if there are no enough space in journal to store dirty nat 1936 * entries, remove all entries from journal and merge them 1937 * into nat entry set. 1938 */ 1939 if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL)) 1940 remove_nats_in_journal(sbi); 1941 1942 while ((found = __gang_lookup_nat_set(nm_i, | 1898 struct nat_entry_set *set, *tmp; 1899 unsigned int found; 1900 nid_t set_idx = 0; 1901 LIST_HEAD(sets); 1902 1903 if (!nm_i->dirty_nat_cnt) 1904 return; 1905 /* 1906 * if there are no enough space in journal to store dirty nat 1907 * entries, remove all entries from journal and merge them 1908 * into nat entry set. 1909 */ 1910 if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL)) 1911 remove_nats_in_journal(sbi); 1912 1913 while ((found = __gang_lookup_nat_set(nm_i, |
1943 set_idx, NATVEC_SIZE, setvec))) { | 1914 set_idx, SETVEC_SIZE, setvec))) { |
1944 unsigned idx; 1945 set_idx = setvec[found - 1]->set + 1; 1946 for (idx = 0; idx < found; idx++) 1947 __adjust_nat_entry_set(setvec[idx], &sets, 1948 MAX_NAT_JENTRIES(sum)); 1949 } 1950 1951 /* flush dirty nats in nat entry set */ --- 63 unchanged lines hidden (view full) --- 2015 return 0; 2016} 2017 2018void destroy_node_manager(struct f2fs_sb_info *sbi) 2019{ 2020 struct f2fs_nm_info *nm_i = NM_I(sbi); 2021 struct free_nid *i, *next_i; 2022 struct nat_entry *natvec[NATVEC_SIZE]; | 1915 unsigned idx; 1916 set_idx = setvec[found - 1]->set + 1; 1917 for (idx = 0; idx < found; idx++) 1918 __adjust_nat_entry_set(setvec[idx], &sets, 1919 MAX_NAT_JENTRIES(sum)); 1920 } 1921 1922 /* flush dirty nats in nat entry set */ --- 63 unchanged lines hidden (view full) --- 1986 return 0; 1987} 1988 1989void destroy_node_manager(struct f2fs_sb_info *sbi) 1990{ 1991 struct f2fs_nm_info *nm_i = NM_I(sbi); 1992 struct free_nid *i, *next_i; 1993 struct nat_entry *natvec[NATVEC_SIZE]; |
1994 struct nat_entry_set *setvec[SETVEC_SIZE]; |
|
2023 nid_t nid = 0; 2024 unsigned int found; 2025 2026 if (!nm_i) 2027 return; 2028 2029 /* destroy free nid list */ 2030 spin_lock(&nm_i->free_nid_list_lock); --- 8 unchanged lines hidden (view full) --- 2039 f2fs_bug_on(sbi, nm_i->fcnt); 2040 spin_unlock(&nm_i->free_nid_list_lock); 2041 2042 /* destroy nat cache */ 2043 down_write(&nm_i->nat_tree_lock); 2044 while ((found = __gang_lookup_nat_cache(nm_i, 2045 nid, NATVEC_SIZE, natvec))) { 2046 unsigned idx; | 1995 nid_t nid = 0; 1996 unsigned int found; 1997 1998 if (!nm_i) 1999 return; 2000 2001 /* destroy free nid list */ 2002 spin_lock(&nm_i->free_nid_list_lock); --- 8 unchanged lines hidden (view full) --- 2011 f2fs_bug_on(sbi, nm_i->fcnt); 2012 spin_unlock(&nm_i->free_nid_list_lock); 2013 2014 /* destroy nat cache */ 2015 down_write(&nm_i->nat_tree_lock); 2016 while ((found = __gang_lookup_nat_cache(nm_i, 2017 nid, NATVEC_SIZE, natvec))) { 2018 unsigned idx; |
2019 |
|
2047 nid = nat_get_nid(natvec[found - 1]) + 1; 2048 for (idx = 0; idx < found; idx++) 2049 __del_from_nat_cache(nm_i, natvec[idx]); 2050 } 2051 f2fs_bug_on(sbi, nm_i->nat_cnt); | 2020 nid = nat_get_nid(natvec[found - 1]) + 1; 2021 for (idx = 0; idx < found; idx++) 2022 __del_from_nat_cache(nm_i, natvec[idx]); 2023 } 2024 f2fs_bug_on(sbi, nm_i->nat_cnt); |
2025 2026 /* destroy nat set cache */ 2027 nid = 0; 2028 while ((found = __gang_lookup_nat_set(nm_i, 2029 nid, SETVEC_SIZE, setvec))) { 2030 unsigned idx; 2031 2032 nid = setvec[found - 1]->set + 1; 2033 for (idx = 0; idx < found; idx++) { 2034 /* entry_cnt is not zero, when cp_error was occurred */ 2035 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); 2036 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); 2037 kmem_cache_free(nat_entry_set_slab, setvec[idx]); 2038 } 2039 } |
|
2052 up_write(&nm_i->nat_tree_lock); 2053 2054 kfree(nm_i->nat_bitmap); 2055 sbi->nm_info = NULL; 2056 kfree(nm_i); 2057} 2058 2059int __init create_node_manager_caches(void) --- 31 unchanged lines hidden --- | 2040 up_write(&nm_i->nat_tree_lock); 2041 2042 kfree(nm_i->nat_bitmap); 2043 sbi->nm_info = NULL; 2044 kfree(nm_i); 2045} 2046 2047int __init create_node_manager_caches(void) --- 31 unchanged lines hidden --- |