10a8165d7SJaegeuk Kim /* 2351df4b2SJaegeuk Kim * fs/f2fs/segment.c 3351df4b2SJaegeuk Kim * 4351df4b2SJaegeuk Kim * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5351df4b2SJaegeuk Kim * http://www.samsung.com/ 6351df4b2SJaegeuk Kim * 7351df4b2SJaegeuk Kim * This program is free software; you can redistribute it and/or modify 8351df4b2SJaegeuk Kim * it under the terms of the GNU General Public License version 2 as 9351df4b2SJaegeuk Kim * published by the Free Software Foundation. 10351df4b2SJaegeuk Kim */ 11351df4b2SJaegeuk Kim #include <linux/fs.h> 12351df4b2SJaegeuk Kim #include <linux/f2fs_fs.h> 13351df4b2SJaegeuk Kim #include <linux/bio.h> 14351df4b2SJaegeuk Kim #include <linux/blkdev.h> 15690e4a3eSGeert Uytterhoeven #include <linux/prefetch.h> 16351df4b2SJaegeuk Kim #include <linux/vmalloc.h> 17351df4b2SJaegeuk Kim 18351df4b2SJaegeuk Kim #include "f2fs.h" 19351df4b2SJaegeuk Kim #include "segment.h" 20351df4b2SJaegeuk Kim #include "node.h" 216ec178daSNamjae Jeon #include <trace/events/f2fs.h> 22351df4b2SJaegeuk Kim 230a8165d7SJaegeuk Kim /* 24351df4b2SJaegeuk Kim * This function balances dirty node and dentry pages. 25351df4b2SJaegeuk Kim * In addition, it controls garbage collection. 26351df4b2SJaegeuk Kim */ 27351df4b2SJaegeuk Kim void f2fs_balance_fs(struct f2fs_sb_info *sbi) 28351df4b2SJaegeuk Kim { 29351df4b2SJaegeuk Kim /* 30029cd28cSJaegeuk Kim * We should do GC or end up with checkpoint, if there are so many dirty 31029cd28cSJaegeuk Kim * dir/node pages without enough free segments. 32351df4b2SJaegeuk Kim */ 3343727527SJaegeuk Kim if (has_not_enough_free_secs(sbi, 0)) { 34351df4b2SJaegeuk Kim mutex_lock(&sbi->gc_mutex); 35408e9375SJaegeuk Kim f2fs_gc(sbi); 36351df4b2SJaegeuk Kim } 37351df4b2SJaegeuk Kim } 38351df4b2SJaegeuk Kim 394660f9c0SJaegeuk Kim void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) 404660f9c0SJaegeuk Kim { 414660f9c0SJaegeuk Kim /* check the # of cached NAT entries and prefree segments */ 424660f9c0SJaegeuk Kim if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) || 434660f9c0SJaegeuk Kim excess_prefree_segs(sbi)) 444660f9c0SJaegeuk Kim f2fs_sync_fs(sbi->sb, true); 454660f9c0SJaegeuk Kim } 464660f9c0SJaegeuk Kim 47351df4b2SJaegeuk Kim static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 48351df4b2SJaegeuk Kim enum dirty_type dirty_type) 49351df4b2SJaegeuk Kim { 50351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 51351df4b2SJaegeuk Kim 52351df4b2SJaegeuk Kim /* need not be added */ 53351df4b2SJaegeuk Kim if (IS_CURSEG(sbi, segno)) 54351df4b2SJaegeuk Kim return; 55351df4b2SJaegeuk Kim 56351df4b2SJaegeuk Kim if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) 57351df4b2SJaegeuk Kim dirty_i->nr_dirty[dirty_type]++; 58351df4b2SJaegeuk Kim 59351df4b2SJaegeuk Kim if (dirty_type == DIRTY) { 60351df4b2SJaegeuk Kim struct seg_entry *sentry = get_seg_entry(sbi, segno); 614625d6aaSChangman Lee enum dirty_type t = sentry->type; 62b2f2c390SJaegeuk Kim 634625d6aaSChangman Lee if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) 644625d6aaSChangman Lee dirty_i->nr_dirty[t]++; 65351df4b2SJaegeuk Kim } 66351df4b2SJaegeuk Kim } 67351df4b2SJaegeuk Kim 68351df4b2SJaegeuk Kim static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 69351df4b2SJaegeuk Kim enum dirty_type dirty_type) 70351df4b2SJaegeuk Kim { 71351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 72351df4b2SJaegeuk Kim 73351df4b2SJaegeuk Kim if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) 74351df4b2SJaegeuk Kim dirty_i->nr_dirty[dirty_type]--; 75351df4b2SJaegeuk Kim 76351df4b2SJaegeuk Kim if (dirty_type == DIRTY) { 774625d6aaSChangman Lee struct seg_entry *sentry = get_seg_entry(sbi, segno); 784625d6aaSChangman Lee enum dirty_type t = sentry->type; 79b2f2c390SJaegeuk Kim 804625d6aaSChangman Lee if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) 81b2f2c390SJaegeuk Kim dirty_i->nr_dirty[t]--; 82b2f2c390SJaegeuk Kim 835ec4e49fSJaegeuk Kim if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0) 845ec4e49fSJaegeuk Kim clear_bit(GET_SECNO(sbi, segno), 855ec4e49fSJaegeuk Kim dirty_i->victim_secmap); 86351df4b2SJaegeuk Kim } 87351df4b2SJaegeuk Kim } 88351df4b2SJaegeuk Kim 890a8165d7SJaegeuk Kim /* 90351df4b2SJaegeuk Kim * Should not occur error such as -ENOMEM. 91351df4b2SJaegeuk Kim * Adding dirty entry into seglist is not critical operation. 92351df4b2SJaegeuk Kim * If a given segment is one of current working segments, it won't be added. 93351df4b2SJaegeuk Kim */ 948d8451afSHaicheng Li static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) 95351df4b2SJaegeuk Kim { 96351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 97351df4b2SJaegeuk Kim unsigned short valid_blocks; 98351df4b2SJaegeuk Kim 99351df4b2SJaegeuk Kim if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) 100351df4b2SJaegeuk Kim return; 101351df4b2SJaegeuk Kim 102351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 103351df4b2SJaegeuk Kim 104351df4b2SJaegeuk Kim valid_blocks = get_valid_blocks(sbi, segno, 0); 105351df4b2SJaegeuk Kim 106351df4b2SJaegeuk Kim if (valid_blocks == 0) { 107351df4b2SJaegeuk Kim __locate_dirty_segment(sbi, segno, PRE); 108351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, segno, DIRTY); 109351df4b2SJaegeuk Kim } else if (valid_blocks < sbi->blocks_per_seg) { 110351df4b2SJaegeuk Kim __locate_dirty_segment(sbi, segno, DIRTY); 111351df4b2SJaegeuk Kim } else { 112351df4b2SJaegeuk Kim /* Recovery routine with SSR needs this */ 113351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, segno, DIRTY); 114351df4b2SJaegeuk Kim } 115351df4b2SJaegeuk Kim 116351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 117351df4b2SJaegeuk Kim } 118351df4b2SJaegeuk Kim 1190a8165d7SJaegeuk Kim /* 120351df4b2SJaegeuk Kim * Should call clear_prefree_segments after checkpoint is done. 121351df4b2SJaegeuk Kim */ 122351df4b2SJaegeuk Kim static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) 123351df4b2SJaegeuk Kim { 124351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 12581fb5e87SHaicheng Li unsigned int segno = -1; 126351df4b2SJaegeuk Kim unsigned int total_segs = TOTAL_SEGS(sbi); 127351df4b2SJaegeuk Kim 128351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 129351df4b2SJaegeuk Kim while (1) { 130351df4b2SJaegeuk Kim segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs, 13181fb5e87SHaicheng Li segno + 1); 132351df4b2SJaegeuk Kim if (segno >= total_segs) 133351df4b2SJaegeuk Kim break; 134351df4b2SJaegeuk Kim __set_test_and_free(sbi, segno); 135351df4b2SJaegeuk Kim } 136351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 137351df4b2SJaegeuk Kim } 138351df4b2SJaegeuk Kim 139351df4b2SJaegeuk Kim void clear_prefree_segments(struct f2fs_sb_info *sbi) 140351df4b2SJaegeuk Kim { 141351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 142*29e59c14SChangman Lee unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; 143351df4b2SJaegeuk Kim unsigned int total_segs = TOTAL_SEGS(sbi); 144*29e59c14SChangman Lee unsigned int start = 0, end = -1; 145351df4b2SJaegeuk Kim 146351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 147*29e59c14SChangman Lee 148351df4b2SJaegeuk Kim while (1) { 149*29e59c14SChangman Lee int i; 150*29e59c14SChangman Lee start = find_next_bit(prefree_map, total_segs, end + 1); 151*29e59c14SChangman Lee if (start >= total_segs) 152351df4b2SJaegeuk Kim break; 153*29e59c14SChangman Lee end = find_next_zero_bit(prefree_map, total_segs, start + 1); 154351df4b2SJaegeuk Kim 155*29e59c14SChangman Lee for (i = start; i < end; i++) 156*29e59c14SChangman Lee clear_bit(i, prefree_map); 157351df4b2SJaegeuk Kim 158*29e59c14SChangman Lee dirty_i->nr_dirty[PRE] -= end - start; 159*29e59c14SChangman Lee 160*29e59c14SChangman Lee if (!test_opt(sbi, DISCARD)) 161*29e59c14SChangman Lee continue; 162*29e59c14SChangman Lee 163351df4b2SJaegeuk Kim blkdev_issue_discard(sbi->sb->s_bdev, 164*29e59c14SChangman Lee START_BLOCK(sbi, start) << 165351df4b2SJaegeuk Kim sbi->log_sectors_per_block, 166*29e59c14SChangman Lee (1 << (sbi->log_sectors_per_block + 167*29e59c14SChangman Lee sbi->log_blocks_per_seg)) * (end - start), 168351df4b2SJaegeuk Kim GFP_NOFS, 0); 169351df4b2SJaegeuk Kim } 170351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 171351df4b2SJaegeuk Kim } 172351df4b2SJaegeuk Kim 173351df4b2SJaegeuk Kim static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) 174351df4b2SJaegeuk Kim { 175351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 176351df4b2SJaegeuk Kim if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) 177351df4b2SJaegeuk Kim sit_i->dirty_sentries++; 178351df4b2SJaegeuk Kim } 179351df4b2SJaegeuk Kim 180351df4b2SJaegeuk Kim static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, 181351df4b2SJaegeuk Kim unsigned int segno, int modified) 182351df4b2SJaegeuk Kim { 183351df4b2SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, segno); 184351df4b2SJaegeuk Kim se->type = type; 185351df4b2SJaegeuk Kim if (modified) 186351df4b2SJaegeuk Kim __mark_sit_entry_dirty(sbi, segno); 187351df4b2SJaegeuk Kim } 188351df4b2SJaegeuk Kim 189351df4b2SJaegeuk Kim static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) 190351df4b2SJaegeuk Kim { 191351df4b2SJaegeuk Kim struct seg_entry *se; 192351df4b2SJaegeuk Kim unsigned int segno, offset; 193351df4b2SJaegeuk Kim long int new_vblocks; 194351df4b2SJaegeuk Kim 195351df4b2SJaegeuk Kim segno = GET_SEGNO(sbi, blkaddr); 196351df4b2SJaegeuk Kim 197351df4b2SJaegeuk Kim se = get_seg_entry(sbi, segno); 198351df4b2SJaegeuk Kim new_vblocks = se->valid_blocks + del; 199351df4b2SJaegeuk Kim offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1); 200351df4b2SJaegeuk Kim 2015d56b671SJaegeuk Kim f2fs_bug_on((new_vblocks >> (sizeof(unsigned short) << 3) || 202351df4b2SJaegeuk Kim (new_vblocks > sbi->blocks_per_seg))); 203351df4b2SJaegeuk Kim 204351df4b2SJaegeuk Kim se->valid_blocks = new_vblocks; 205351df4b2SJaegeuk Kim se->mtime = get_mtime(sbi); 206351df4b2SJaegeuk Kim SIT_I(sbi)->max_mtime = se->mtime; 207351df4b2SJaegeuk Kim 208351df4b2SJaegeuk Kim /* Update valid block bitmap */ 209351df4b2SJaegeuk Kim if (del > 0) { 210351df4b2SJaegeuk Kim if (f2fs_set_bit(offset, se->cur_valid_map)) 211351df4b2SJaegeuk Kim BUG(); 212351df4b2SJaegeuk Kim } else { 213351df4b2SJaegeuk Kim if (!f2fs_clear_bit(offset, se->cur_valid_map)) 214351df4b2SJaegeuk Kim BUG(); 215351df4b2SJaegeuk Kim } 216351df4b2SJaegeuk Kim if (!f2fs_test_bit(offset, se->ckpt_valid_map)) 217351df4b2SJaegeuk Kim se->ckpt_valid_blocks += del; 218351df4b2SJaegeuk Kim 219351df4b2SJaegeuk Kim __mark_sit_entry_dirty(sbi, segno); 220351df4b2SJaegeuk Kim 221351df4b2SJaegeuk Kim /* update total number of valid blocks to be written in ckpt area */ 222351df4b2SJaegeuk Kim SIT_I(sbi)->written_valid_blocks += del; 223351df4b2SJaegeuk Kim 224351df4b2SJaegeuk Kim if (sbi->segs_per_sec > 1) 225351df4b2SJaegeuk Kim get_sec_entry(sbi, segno)->valid_blocks += del; 226351df4b2SJaegeuk Kim } 227351df4b2SJaegeuk Kim 228351df4b2SJaegeuk Kim static void refresh_sit_entry(struct f2fs_sb_info *sbi, 229351df4b2SJaegeuk Kim block_t old_blkaddr, block_t new_blkaddr) 230351df4b2SJaegeuk Kim { 231351df4b2SJaegeuk Kim update_sit_entry(sbi, new_blkaddr, 1); 232351df4b2SJaegeuk Kim if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) 233351df4b2SJaegeuk Kim update_sit_entry(sbi, old_blkaddr, -1); 234351df4b2SJaegeuk Kim } 235351df4b2SJaegeuk Kim 236351df4b2SJaegeuk Kim void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) 237351df4b2SJaegeuk Kim { 238351df4b2SJaegeuk Kim unsigned int segno = GET_SEGNO(sbi, addr); 239351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 240351df4b2SJaegeuk Kim 2415d56b671SJaegeuk Kim f2fs_bug_on(addr == NULL_ADDR); 242351df4b2SJaegeuk Kim if (addr == NEW_ADDR) 243351df4b2SJaegeuk Kim return; 244351df4b2SJaegeuk Kim 245351df4b2SJaegeuk Kim /* add it into sit main buffer */ 246351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 247351df4b2SJaegeuk Kim 248351df4b2SJaegeuk Kim update_sit_entry(sbi, addr, -1); 249351df4b2SJaegeuk Kim 250351df4b2SJaegeuk Kim /* add it into dirty seglist */ 251351df4b2SJaegeuk Kim locate_dirty_segment(sbi, segno); 252351df4b2SJaegeuk Kim 253351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 254351df4b2SJaegeuk Kim } 255351df4b2SJaegeuk Kim 2560a8165d7SJaegeuk Kim /* 257351df4b2SJaegeuk Kim * This function should be resided under the curseg_mutex lock 258351df4b2SJaegeuk Kim */ 259351df4b2SJaegeuk Kim static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, 260e79efe3bSHaicheng Li struct f2fs_summary *sum) 261351df4b2SJaegeuk Kim { 262351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 263351df4b2SJaegeuk Kim void *addr = curseg->sum_blk; 264e79efe3bSHaicheng Li addr += curseg->next_blkoff * sizeof(struct f2fs_summary); 265351df4b2SJaegeuk Kim memcpy(addr, sum, sizeof(struct f2fs_summary)); 266351df4b2SJaegeuk Kim } 267351df4b2SJaegeuk Kim 2680a8165d7SJaegeuk Kim /* 269351df4b2SJaegeuk Kim * Calculate the number of current summary pages for writing 270351df4b2SJaegeuk Kim */ 271351df4b2SJaegeuk Kim int npages_for_summary_flush(struct f2fs_sb_info *sbi) 272351df4b2SJaegeuk Kim { 273351df4b2SJaegeuk Kim int valid_sum_count = 0; 2749a47938bSFan Li int i, sum_in_page; 275351df4b2SJaegeuk Kim 276351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 277351df4b2SJaegeuk Kim if (sbi->ckpt->alloc_type[i] == SSR) 278351df4b2SJaegeuk Kim valid_sum_count += sbi->blocks_per_seg; 279351df4b2SJaegeuk Kim else 280351df4b2SJaegeuk Kim valid_sum_count += curseg_blkoff(sbi, i); 281351df4b2SJaegeuk Kim } 282351df4b2SJaegeuk Kim 2839a47938bSFan Li sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE - 2849a47938bSFan Li SUM_FOOTER_SIZE) / SUMMARY_SIZE; 2859a47938bSFan Li if (valid_sum_count <= sum_in_page) 286351df4b2SJaegeuk Kim return 1; 2879a47938bSFan Li else if ((valid_sum_count - sum_in_page) <= 2889a47938bSFan Li (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) 289351df4b2SJaegeuk Kim return 2; 290351df4b2SJaegeuk Kim return 3; 291351df4b2SJaegeuk Kim } 292351df4b2SJaegeuk Kim 2930a8165d7SJaegeuk Kim /* 294351df4b2SJaegeuk Kim * Caller should put this summary page 295351df4b2SJaegeuk Kim */ 296351df4b2SJaegeuk Kim struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) 297351df4b2SJaegeuk Kim { 298351df4b2SJaegeuk Kim return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno)); 299351df4b2SJaegeuk Kim } 300351df4b2SJaegeuk Kim 301351df4b2SJaegeuk Kim static void write_sum_page(struct f2fs_sb_info *sbi, 302351df4b2SJaegeuk Kim struct f2fs_summary_block *sum_blk, block_t blk_addr) 303351df4b2SJaegeuk Kim { 304351df4b2SJaegeuk Kim struct page *page = grab_meta_page(sbi, blk_addr); 305351df4b2SJaegeuk Kim void *kaddr = page_address(page); 306351df4b2SJaegeuk Kim memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE); 307351df4b2SJaegeuk Kim set_page_dirty(page); 308351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 309351df4b2SJaegeuk Kim } 310351df4b2SJaegeuk Kim 31160374688SJaegeuk Kim static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) 31260374688SJaegeuk Kim { 31360374688SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 31481fb5e87SHaicheng Li unsigned int segno = curseg->segno + 1; 31560374688SJaegeuk Kim struct free_segmap_info *free_i = FREE_I(sbi); 31660374688SJaegeuk Kim 31781fb5e87SHaicheng Li if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec) 31881fb5e87SHaicheng Li return !test_bit(segno, free_i->free_segmap); 31960374688SJaegeuk Kim return 0; 32060374688SJaegeuk Kim } 32160374688SJaegeuk Kim 3220a8165d7SJaegeuk Kim /* 323351df4b2SJaegeuk Kim * Find a new segment from the free segments bitmap to right order 324351df4b2SJaegeuk Kim * This function should be returned with success, otherwise BUG 325351df4b2SJaegeuk Kim */ 326351df4b2SJaegeuk Kim static void get_new_segment(struct f2fs_sb_info *sbi, 327351df4b2SJaegeuk Kim unsigned int *newseg, bool new_sec, int dir) 328351df4b2SJaegeuk Kim { 329351df4b2SJaegeuk Kim struct free_segmap_info *free_i = FREE_I(sbi); 330351df4b2SJaegeuk Kim unsigned int segno, secno, zoneno; 33153cf9522SJaegeuk Kim unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone; 332351df4b2SJaegeuk Kim unsigned int hint = *newseg / sbi->segs_per_sec; 333351df4b2SJaegeuk Kim unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg); 334351df4b2SJaegeuk Kim unsigned int left_start = hint; 335351df4b2SJaegeuk Kim bool init = true; 336351df4b2SJaegeuk Kim int go_left = 0; 337351df4b2SJaegeuk Kim int i; 338351df4b2SJaegeuk Kim 339351df4b2SJaegeuk Kim write_lock(&free_i->segmap_lock); 340351df4b2SJaegeuk Kim 341351df4b2SJaegeuk Kim if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { 342351df4b2SJaegeuk Kim segno = find_next_zero_bit(free_i->free_segmap, 343351df4b2SJaegeuk Kim TOTAL_SEGS(sbi), *newseg + 1); 34433afa7fdSJaegeuk Kim if (segno - *newseg < sbi->segs_per_sec - 34533afa7fdSJaegeuk Kim (*newseg % sbi->segs_per_sec)) 346351df4b2SJaegeuk Kim goto got_it; 347351df4b2SJaegeuk Kim } 348351df4b2SJaegeuk Kim find_other_zone: 34953cf9522SJaegeuk Kim secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint); 35053cf9522SJaegeuk Kim if (secno >= TOTAL_SECS(sbi)) { 351351df4b2SJaegeuk Kim if (dir == ALLOC_RIGHT) { 352351df4b2SJaegeuk Kim secno = find_next_zero_bit(free_i->free_secmap, 35353cf9522SJaegeuk Kim TOTAL_SECS(sbi), 0); 3545d56b671SJaegeuk Kim f2fs_bug_on(secno >= TOTAL_SECS(sbi)); 355351df4b2SJaegeuk Kim } else { 356351df4b2SJaegeuk Kim go_left = 1; 357351df4b2SJaegeuk Kim left_start = hint - 1; 358351df4b2SJaegeuk Kim } 359351df4b2SJaegeuk Kim } 360351df4b2SJaegeuk Kim if (go_left == 0) 361351df4b2SJaegeuk Kim goto skip_left; 362351df4b2SJaegeuk Kim 363351df4b2SJaegeuk Kim while (test_bit(left_start, free_i->free_secmap)) { 364351df4b2SJaegeuk Kim if (left_start > 0) { 365351df4b2SJaegeuk Kim left_start--; 366351df4b2SJaegeuk Kim continue; 367351df4b2SJaegeuk Kim } 368351df4b2SJaegeuk Kim left_start = find_next_zero_bit(free_i->free_secmap, 36953cf9522SJaegeuk Kim TOTAL_SECS(sbi), 0); 3705d56b671SJaegeuk Kim f2fs_bug_on(left_start >= TOTAL_SECS(sbi)); 371351df4b2SJaegeuk Kim break; 372351df4b2SJaegeuk Kim } 373351df4b2SJaegeuk Kim secno = left_start; 374351df4b2SJaegeuk Kim skip_left: 375351df4b2SJaegeuk Kim hint = secno; 376351df4b2SJaegeuk Kim segno = secno * sbi->segs_per_sec; 377351df4b2SJaegeuk Kim zoneno = secno / sbi->secs_per_zone; 378351df4b2SJaegeuk Kim 379351df4b2SJaegeuk Kim /* give up on finding another zone */ 380351df4b2SJaegeuk Kim if (!init) 381351df4b2SJaegeuk Kim goto got_it; 382351df4b2SJaegeuk Kim if (sbi->secs_per_zone == 1) 383351df4b2SJaegeuk Kim goto got_it; 384351df4b2SJaegeuk Kim if (zoneno == old_zoneno) 385351df4b2SJaegeuk Kim goto got_it; 386351df4b2SJaegeuk Kim if (dir == ALLOC_LEFT) { 387351df4b2SJaegeuk Kim if (!go_left && zoneno + 1 >= total_zones) 388351df4b2SJaegeuk Kim goto got_it; 389351df4b2SJaegeuk Kim if (go_left && zoneno == 0) 390351df4b2SJaegeuk Kim goto got_it; 391351df4b2SJaegeuk Kim } 392351df4b2SJaegeuk Kim for (i = 0; i < NR_CURSEG_TYPE; i++) 393351df4b2SJaegeuk Kim if (CURSEG_I(sbi, i)->zone == zoneno) 394351df4b2SJaegeuk Kim break; 395351df4b2SJaegeuk Kim 396351df4b2SJaegeuk Kim if (i < NR_CURSEG_TYPE) { 397351df4b2SJaegeuk Kim /* zone is in user, try another */ 398351df4b2SJaegeuk Kim if (go_left) 399351df4b2SJaegeuk Kim hint = zoneno * sbi->secs_per_zone - 1; 400351df4b2SJaegeuk Kim else if (zoneno + 1 >= total_zones) 401351df4b2SJaegeuk Kim hint = 0; 402351df4b2SJaegeuk Kim else 403351df4b2SJaegeuk Kim hint = (zoneno + 1) * sbi->secs_per_zone; 404351df4b2SJaegeuk Kim init = false; 405351df4b2SJaegeuk Kim goto find_other_zone; 406351df4b2SJaegeuk Kim } 407351df4b2SJaegeuk Kim got_it: 408351df4b2SJaegeuk Kim /* set it as dirty segment in free segmap */ 4095d56b671SJaegeuk Kim f2fs_bug_on(test_bit(segno, free_i->free_segmap)); 410351df4b2SJaegeuk Kim __set_inuse(sbi, segno); 411351df4b2SJaegeuk Kim *newseg = segno; 412351df4b2SJaegeuk Kim write_unlock(&free_i->segmap_lock); 413351df4b2SJaegeuk Kim } 414351df4b2SJaegeuk Kim 415351df4b2SJaegeuk Kim static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) 416351df4b2SJaegeuk Kim { 417351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 418351df4b2SJaegeuk Kim struct summary_footer *sum_footer; 419351df4b2SJaegeuk Kim 420351df4b2SJaegeuk Kim curseg->segno = curseg->next_segno; 421351df4b2SJaegeuk Kim curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno); 422351df4b2SJaegeuk Kim curseg->next_blkoff = 0; 423351df4b2SJaegeuk Kim curseg->next_segno = NULL_SEGNO; 424351df4b2SJaegeuk Kim 425351df4b2SJaegeuk Kim sum_footer = &(curseg->sum_blk->footer); 426351df4b2SJaegeuk Kim memset(sum_footer, 0, sizeof(struct summary_footer)); 427351df4b2SJaegeuk Kim if (IS_DATASEG(type)) 428351df4b2SJaegeuk Kim SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); 429351df4b2SJaegeuk Kim if (IS_NODESEG(type)) 430351df4b2SJaegeuk Kim SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); 431351df4b2SJaegeuk Kim __set_sit_entry_type(sbi, type, curseg->segno, modified); 432351df4b2SJaegeuk Kim } 433351df4b2SJaegeuk Kim 4340a8165d7SJaegeuk Kim /* 435351df4b2SJaegeuk Kim * Allocate a current working segment. 436351df4b2SJaegeuk Kim * This function always allocates a free segment in LFS manner. 437351df4b2SJaegeuk Kim */ 438351df4b2SJaegeuk Kim static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) 439351df4b2SJaegeuk Kim { 440351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 441351df4b2SJaegeuk Kim unsigned int segno = curseg->segno; 442351df4b2SJaegeuk Kim int dir = ALLOC_LEFT; 443351df4b2SJaegeuk Kim 444351df4b2SJaegeuk Kim write_sum_page(sbi, curseg->sum_blk, 44581fb5e87SHaicheng Li GET_SUM_BLOCK(sbi, segno)); 446351df4b2SJaegeuk Kim if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA) 447351df4b2SJaegeuk Kim dir = ALLOC_RIGHT; 448351df4b2SJaegeuk Kim 449351df4b2SJaegeuk Kim if (test_opt(sbi, NOHEAP)) 450351df4b2SJaegeuk Kim dir = ALLOC_RIGHT; 451351df4b2SJaegeuk Kim 452351df4b2SJaegeuk Kim get_new_segment(sbi, &segno, new_sec, dir); 453351df4b2SJaegeuk Kim curseg->next_segno = segno; 454351df4b2SJaegeuk Kim reset_curseg(sbi, type, 1); 455351df4b2SJaegeuk Kim curseg->alloc_type = LFS; 456351df4b2SJaegeuk Kim } 457351df4b2SJaegeuk Kim 458351df4b2SJaegeuk Kim static void __next_free_blkoff(struct f2fs_sb_info *sbi, 459351df4b2SJaegeuk Kim struct curseg_info *seg, block_t start) 460351df4b2SJaegeuk Kim { 461351df4b2SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, seg->segno); 462351df4b2SJaegeuk Kim block_t ofs; 463351df4b2SJaegeuk Kim for (ofs = start; ofs < sbi->blocks_per_seg; ofs++) { 464351df4b2SJaegeuk Kim if (!f2fs_test_bit(ofs, se->ckpt_valid_map) 465351df4b2SJaegeuk Kim && !f2fs_test_bit(ofs, se->cur_valid_map)) 466351df4b2SJaegeuk Kim break; 467351df4b2SJaegeuk Kim } 468351df4b2SJaegeuk Kim seg->next_blkoff = ofs; 469351df4b2SJaegeuk Kim } 470351df4b2SJaegeuk Kim 4710a8165d7SJaegeuk Kim /* 472351df4b2SJaegeuk Kim * If a segment is written by LFS manner, next block offset is just obtained 473351df4b2SJaegeuk Kim * by increasing the current block offset. However, if a segment is written by 474351df4b2SJaegeuk Kim * SSR manner, next block offset obtained by calling __next_free_blkoff 475351df4b2SJaegeuk Kim */ 476351df4b2SJaegeuk Kim static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, 477351df4b2SJaegeuk Kim struct curseg_info *seg) 478351df4b2SJaegeuk Kim { 479351df4b2SJaegeuk Kim if (seg->alloc_type == SSR) 480351df4b2SJaegeuk Kim __next_free_blkoff(sbi, seg, seg->next_blkoff + 1); 481351df4b2SJaegeuk Kim else 482351df4b2SJaegeuk Kim seg->next_blkoff++; 483351df4b2SJaegeuk Kim } 484351df4b2SJaegeuk Kim 4850a8165d7SJaegeuk Kim /* 486351df4b2SJaegeuk Kim * This function always allocates a used segment (from dirty seglist) by SSR 487351df4b2SJaegeuk Kim * manner, so it should recover the existing segment information of valid blocks 488351df4b2SJaegeuk Kim */ 489351df4b2SJaegeuk Kim static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse) 490351df4b2SJaegeuk Kim { 491351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 492351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 493351df4b2SJaegeuk Kim unsigned int new_segno = curseg->next_segno; 494351df4b2SJaegeuk Kim struct f2fs_summary_block *sum_node; 495351df4b2SJaegeuk Kim struct page *sum_page; 496351df4b2SJaegeuk Kim 497351df4b2SJaegeuk Kim write_sum_page(sbi, curseg->sum_blk, 498351df4b2SJaegeuk Kim GET_SUM_BLOCK(sbi, curseg->segno)); 499351df4b2SJaegeuk Kim __set_test_and_inuse(sbi, new_segno); 500351df4b2SJaegeuk Kim 501351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 502351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, new_segno, PRE); 503351df4b2SJaegeuk Kim __remove_dirty_segment(sbi, new_segno, DIRTY); 504351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 505351df4b2SJaegeuk Kim 506351df4b2SJaegeuk Kim reset_curseg(sbi, type, 1); 507351df4b2SJaegeuk Kim curseg->alloc_type = SSR; 508351df4b2SJaegeuk Kim __next_free_blkoff(sbi, curseg, 0); 509351df4b2SJaegeuk Kim 510351df4b2SJaegeuk Kim if (reuse) { 511351df4b2SJaegeuk Kim sum_page = get_sum_page(sbi, new_segno); 512351df4b2SJaegeuk Kim sum_node = (struct f2fs_summary_block *)page_address(sum_page); 513351df4b2SJaegeuk Kim memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); 514351df4b2SJaegeuk Kim f2fs_put_page(sum_page, 1); 515351df4b2SJaegeuk Kim } 516351df4b2SJaegeuk Kim } 517351df4b2SJaegeuk Kim 51843727527SJaegeuk Kim static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) 51943727527SJaegeuk Kim { 52043727527SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 52143727527SJaegeuk Kim const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops; 52243727527SJaegeuk Kim 52343727527SJaegeuk Kim if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0)) 52443727527SJaegeuk Kim return v_ops->get_victim(sbi, 52543727527SJaegeuk Kim &(curseg)->next_segno, BG_GC, type, SSR); 52643727527SJaegeuk Kim 52743727527SJaegeuk Kim /* For data segments, let's do SSR more intensively */ 52843727527SJaegeuk Kim for (; type >= CURSEG_HOT_DATA; type--) 52943727527SJaegeuk Kim if (v_ops->get_victim(sbi, &(curseg)->next_segno, 53043727527SJaegeuk Kim BG_GC, type, SSR)) 53143727527SJaegeuk Kim return 1; 53243727527SJaegeuk Kim return 0; 53343727527SJaegeuk Kim } 53443727527SJaegeuk Kim 535351df4b2SJaegeuk Kim /* 536351df4b2SJaegeuk Kim * flush out current segment and replace it with new segment 537351df4b2SJaegeuk Kim * This function should be returned with success, otherwise BUG 538351df4b2SJaegeuk Kim */ 539351df4b2SJaegeuk Kim static void allocate_segment_by_default(struct f2fs_sb_info *sbi, 540351df4b2SJaegeuk Kim int type, bool force) 541351df4b2SJaegeuk Kim { 542351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 543351df4b2SJaegeuk Kim 5447b405275SGu Zheng if (force) 545351df4b2SJaegeuk Kim new_curseg(sbi, type, true); 5467b405275SGu Zheng else if (type == CURSEG_WARM_NODE) 547351df4b2SJaegeuk Kim new_curseg(sbi, type, false); 54860374688SJaegeuk Kim else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type)) 54960374688SJaegeuk Kim new_curseg(sbi, type, false); 550351df4b2SJaegeuk Kim else if (need_SSR(sbi) && get_ssr_segment(sbi, type)) 551351df4b2SJaegeuk Kim change_curseg(sbi, type, true); 552351df4b2SJaegeuk Kim else 553351df4b2SJaegeuk Kim new_curseg(sbi, type, false); 554dcdfff65SJaegeuk Kim 555dcdfff65SJaegeuk Kim stat_inc_seg_type(sbi, curseg); 556351df4b2SJaegeuk Kim } 557351df4b2SJaegeuk Kim 558351df4b2SJaegeuk Kim void allocate_new_segments(struct f2fs_sb_info *sbi) 559351df4b2SJaegeuk Kim { 560351df4b2SJaegeuk Kim struct curseg_info *curseg; 561351df4b2SJaegeuk Kim unsigned int old_curseg; 562351df4b2SJaegeuk Kim int i; 563351df4b2SJaegeuk Kim 564351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 565351df4b2SJaegeuk Kim curseg = CURSEG_I(sbi, i); 566351df4b2SJaegeuk Kim old_curseg = curseg->segno; 567351df4b2SJaegeuk Kim SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); 568351df4b2SJaegeuk Kim locate_dirty_segment(sbi, old_curseg); 569351df4b2SJaegeuk Kim } 570351df4b2SJaegeuk Kim } 571351df4b2SJaegeuk Kim 572351df4b2SJaegeuk Kim static const struct segment_allocation default_salloc_ops = { 573351df4b2SJaegeuk Kim .allocate_segment = allocate_segment_by_default, 574351df4b2SJaegeuk Kim }; 575351df4b2SJaegeuk Kim 576351df4b2SJaegeuk Kim static void f2fs_end_io_write(struct bio *bio, int err) 577351df4b2SJaegeuk Kim { 578351df4b2SJaegeuk Kim const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 579351df4b2SJaegeuk Kim struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 580351df4b2SJaegeuk Kim struct bio_private *p = bio->bi_private; 581351df4b2SJaegeuk Kim 582351df4b2SJaegeuk Kim do { 583351df4b2SJaegeuk Kim struct page *page = bvec->bv_page; 584351df4b2SJaegeuk Kim 585351df4b2SJaegeuk Kim if (--bvec >= bio->bi_io_vec) 586351df4b2SJaegeuk Kim prefetchw(&bvec->bv_page->flags); 587351df4b2SJaegeuk Kim if (!uptodate) { 588351df4b2SJaegeuk Kim SetPageError(page); 589351df4b2SJaegeuk Kim if (page->mapping) 590351df4b2SJaegeuk Kim set_bit(AS_EIO, &page->mapping->flags); 59125ca923bSJaegeuk Kim set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG); 592577e3495SJaegeuk Kim p->sbi->sb->s_flags |= MS_RDONLY; 593351df4b2SJaegeuk Kim } 594351df4b2SJaegeuk Kim end_page_writeback(page); 595351df4b2SJaegeuk Kim dec_page_count(p->sbi, F2FS_WRITEBACK); 596351df4b2SJaegeuk Kim } while (bvec >= bio->bi_io_vec); 597351df4b2SJaegeuk Kim 598351df4b2SJaegeuk Kim if (p->is_sync) 599351df4b2SJaegeuk Kim complete(p->wait); 600e2340887SGu Zheng 601fb51b5efSChangman Lee if (!get_pages(p->sbi, F2FS_WRITEBACK) && 602fb51b5efSChangman Lee !list_empty(&p->sbi->cp_wait.task_list)) 603fb51b5efSChangman Lee wake_up(&p->sbi->cp_wait); 604e2340887SGu Zheng 605351df4b2SJaegeuk Kim kfree(p); 606351df4b2SJaegeuk Kim bio_put(bio); 607351df4b2SJaegeuk Kim } 608351df4b2SJaegeuk Kim 6093cd8a239SJaegeuk Kim struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages) 610351df4b2SJaegeuk Kim { 611351df4b2SJaegeuk Kim struct bio *bio; 6123cd8a239SJaegeuk Kim 6133cd8a239SJaegeuk Kim /* No failure on bio allocation */ 6143cd8a239SJaegeuk Kim bio = bio_alloc(GFP_NOIO, npages); 6153cd8a239SJaegeuk Kim bio->bi_bdev = bdev; 616d8207f69SGu Zheng bio->bi_private = NULL; 617d8207f69SGu Zheng 618351df4b2SJaegeuk Kim return bio; 619351df4b2SJaegeuk Kim } 620351df4b2SJaegeuk Kim 621351df4b2SJaegeuk Kim static void do_submit_bio(struct f2fs_sb_info *sbi, 622351df4b2SJaegeuk Kim enum page_type type, bool sync) 623351df4b2SJaegeuk Kim { 624351df4b2SJaegeuk Kim int rw = sync ? WRITE_SYNC : WRITE; 625351df4b2SJaegeuk Kim enum page_type btype = type > META ? META : type; 626351df4b2SJaegeuk Kim 627351df4b2SJaegeuk Kim if (type >= META_FLUSH) 628351df4b2SJaegeuk Kim rw = WRITE_FLUSH_FUA; 629351df4b2SJaegeuk Kim 6308680441cSNamjae Jeon if (btype == META) 6318680441cSNamjae Jeon rw |= REQ_META; 6328680441cSNamjae Jeon 633351df4b2SJaegeuk Kim if (sbi->bio[btype]) { 634351df4b2SJaegeuk Kim struct bio_private *p = sbi->bio[btype]->bi_private; 635351df4b2SJaegeuk Kim p->sbi = sbi; 636351df4b2SJaegeuk Kim sbi->bio[btype]->bi_end_io = f2fs_end_io_write; 6376ec178daSNamjae Jeon 6386ec178daSNamjae Jeon trace_f2fs_do_submit_bio(sbi->sb, btype, sync, sbi->bio[btype]); 6396ec178daSNamjae Jeon 640351df4b2SJaegeuk Kim if (type == META_FLUSH) { 641351df4b2SJaegeuk Kim DECLARE_COMPLETION_ONSTACK(wait); 642351df4b2SJaegeuk Kim p->is_sync = true; 643351df4b2SJaegeuk Kim p->wait = &wait; 644351df4b2SJaegeuk Kim submit_bio(rw, sbi->bio[btype]); 645351df4b2SJaegeuk Kim wait_for_completion(&wait); 646351df4b2SJaegeuk Kim } else { 647351df4b2SJaegeuk Kim p->is_sync = false; 648351df4b2SJaegeuk Kim submit_bio(rw, sbi->bio[btype]); 649351df4b2SJaegeuk Kim } 650351df4b2SJaegeuk Kim sbi->bio[btype] = NULL; 651351df4b2SJaegeuk Kim } 652351df4b2SJaegeuk Kim } 653351df4b2SJaegeuk Kim 654351df4b2SJaegeuk Kim void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync) 655351df4b2SJaegeuk Kim { 656351df4b2SJaegeuk Kim down_write(&sbi->bio_sem); 657351df4b2SJaegeuk Kim do_submit_bio(sbi, type, sync); 658351df4b2SJaegeuk Kim up_write(&sbi->bio_sem); 659351df4b2SJaegeuk Kim } 660351df4b2SJaegeuk Kim 661351df4b2SJaegeuk Kim static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page, 662351df4b2SJaegeuk Kim block_t blk_addr, enum page_type type) 663351df4b2SJaegeuk Kim { 664351df4b2SJaegeuk Kim struct block_device *bdev = sbi->sb->s_bdev; 665cc7b1bb1SChao Yu int bio_blocks; 666351df4b2SJaegeuk Kim 667351df4b2SJaegeuk Kim verify_block_addr(sbi, blk_addr); 668351df4b2SJaegeuk Kim 669351df4b2SJaegeuk Kim down_write(&sbi->bio_sem); 670351df4b2SJaegeuk Kim 671351df4b2SJaegeuk Kim inc_page_count(sbi, F2FS_WRITEBACK); 672351df4b2SJaegeuk Kim 673351df4b2SJaegeuk Kim if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1) 674351df4b2SJaegeuk Kim do_submit_bio(sbi, type, false); 675351df4b2SJaegeuk Kim alloc_new: 6763cd8a239SJaegeuk Kim if (sbi->bio[type] == NULL) { 677d8207f69SGu Zheng struct bio_private *priv; 678d8207f69SGu Zheng retry: 679d8207f69SGu Zheng priv = kmalloc(sizeof(struct bio_private), GFP_NOFS); 680d8207f69SGu Zheng if (!priv) { 681d8207f69SGu Zheng cond_resched(); 682d8207f69SGu Zheng goto retry; 683d8207f69SGu Zheng } 684d8207f69SGu Zheng 685cc7b1bb1SChao Yu bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); 686cc7b1bb1SChao Yu sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks); 6873cd8a239SJaegeuk Kim sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); 688d8207f69SGu Zheng sbi->bio[type]->bi_private = priv; 6893cd8a239SJaegeuk Kim /* 6903cd8a239SJaegeuk Kim * The end_io will be assigned at the sumbission phase. 6913cd8a239SJaegeuk Kim * Until then, let bio_add_page() merge consecutive IOs as much 6923cd8a239SJaegeuk Kim * as possible. 6933cd8a239SJaegeuk Kim */ 6943cd8a239SJaegeuk Kim } 695351df4b2SJaegeuk Kim 696351df4b2SJaegeuk Kim if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) < 697351df4b2SJaegeuk Kim PAGE_CACHE_SIZE) { 698351df4b2SJaegeuk Kim do_submit_bio(sbi, type, false); 699351df4b2SJaegeuk Kim goto alloc_new; 700351df4b2SJaegeuk Kim } 701351df4b2SJaegeuk Kim 702351df4b2SJaegeuk Kim sbi->last_block_in_bio[type] = blk_addr; 703351df4b2SJaegeuk Kim 704351df4b2SJaegeuk Kim up_write(&sbi->bio_sem); 7056ec178daSNamjae Jeon trace_f2fs_submit_write_page(page, blk_addr, type); 706351df4b2SJaegeuk Kim } 707351df4b2SJaegeuk Kim 708a569469eSJin Xu void f2fs_wait_on_page_writeback(struct page *page, 709a569469eSJin Xu enum page_type type, bool sync) 710a569469eSJin Xu { 711a569469eSJin Xu struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 712a569469eSJin Xu if (PageWriteback(page)) { 713a569469eSJin Xu f2fs_submit_bio(sbi, type, sync); 714a569469eSJin Xu wait_on_page_writeback(page); 715a569469eSJin Xu } 716a569469eSJin Xu } 717a569469eSJin Xu 718351df4b2SJaegeuk Kim static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) 719351df4b2SJaegeuk Kim { 720351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, type); 721351df4b2SJaegeuk Kim if (curseg->next_blkoff < sbi->blocks_per_seg) 722351df4b2SJaegeuk Kim return true; 723351df4b2SJaegeuk Kim return false; 724351df4b2SJaegeuk Kim } 725351df4b2SJaegeuk Kim 726351df4b2SJaegeuk Kim static int __get_segment_type_2(struct page *page, enum page_type p_type) 727351df4b2SJaegeuk Kim { 728351df4b2SJaegeuk Kim if (p_type == DATA) 729351df4b2SJaegeuk Kim return CURSEG_HOT_DATA; 730351df4b2SJaegeuk Kim else 731351df4b2SJaegeuk Kim return CURSEG_HOT_NODE; 732351df4b2SJaegeuk Kim } 733351df4b2SJaegeuk Kim 734351df4b2SJaegeuk Kim static int __get_segment_type_4(struct page *page, enum page_type p_type) 735351df4b2SJaegeuk Kim { 736351df4b2SJaegeuk Kim if (p_type == DATA) { 737351df4b2SJaegeuk Kim struct inode *inode = page->mapping->host; 738351df4b2SJaegeuk Kim 739351df4b2SJaegeuk Kim if (S_ISDIR(inode->i_mode)) 740351df4b2SJaegeuk Kim return CURSEG_HOT_DATA; 741351df4b2SJaegeuk Kim else 742351df4b2SJaegeuk Kim return CURSEG_COLD_DATA; 743351df4b2SJaegeuk Kim } else { 744351df4b2SJaegeuk Kim if (IS_DNODE(page) && !is_cold_node(page)) 745351df4b2SJaegeuk Kim return CURSEG_HOT_NODE; 746351df4b2SJaegeuk Kim else 747351df4b2SJaegeuk Kim return CURSEG_COLD_NODE; 748351df4b2SJaegeuk Kim } 749351df4b2SJaegeuk Kim } 750351df4b2SJaegeuk Kim 751351df4b2SJaegeuk Kim static int __get_segment_type_6(struct page *page, enum page_type p_type) 752351df4b2SJaegeuk Kim { 753351df4b2SJaegeuk Kim if (p_type == DATA) { 754351df4b2SJaegeuk Kim struct inode *inode = page->mapping->host; 755351df4b2SJaegeuk Kim 756351df4b2SJaegeuk Kim if (S_ISDIR(inode->i_mode)) 757351df4b2SJaegeuk Kim return CURSEG_HOT_DATA; 758354a3399SJaegeuk Kim else if (is_cold_data(page) || file_is_cold(inode)) 759351df4b2SJaegeuk Kim return CURSEG_COLD_DATA; 760351df4b2SJaegeuk Kim else 761351df4b2SJaegeuk Kim return CURSEG_WARM_DATA; 762351df4b2SJaegeuk Kim } else { 763351df4b2SJaegeuk Kim if (IS_DNODE(page)) 764351df4b2SJaegeuk Kim return is_cold_node(page) ? CURSEG_WARM_NODE : 765351df4b2SJaegeuk Kim CURSEG_HOT_NODE; 766351df4b2SJaegeuk Kim else 767351df4b2SJaegeuk Kim return CURSEG_COLD_NODE; 768351df4b2SJaegeuk Kim } 769351df4b2SJaegeuk Kim } 770351df4b2SJaegeuk Kim 771351df4b2SJaegeuk Kim static int __get_segment_type(struct page *page, enum page_type p_type) 772351df4b2SJaegeuk Kim { 773351df4b2SJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 774351df4b2SJaegeuk Kim switch (sbi->active_logs) { 775351df4b2SJaegeuk Kim case 2: 776351df4b2SJaegeuk Kim return __get_segment_type_2(page, p_type); 777351df4b2SJaegeuk Kim case 4: 778351df4b2SJaegeuk Kim return __get_segment_type_4(page, p_type); 779351df4b2SJaegeuk Kim } 78012a67146SJaegeuk Kim /* NR_CURSEG_TYPE(6) logs by default */ 7815d56b671SJaegeuk Kim f2fs_bug_on(sbi->active_logs != NR_CURSEG_TYPE); 78212a67146SJaegeuk Kim return __get_segment_type_6(page, p_type); 783351df4b2SJaegeuk Kim } 784351df4b2SJaegeuk Kim 785351df4b2SJaegeuk Kim static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, 786351df4b2SJaegeuk Kim block_t old_blkaddr, block_t *new_blkaddr, 787351df4b2SJaegeuk Kim struct f2fs_summary *sum, enum page_type p_type) 788351df4b2SJaegeuk Kim { 789351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 790351df4b2SJaegeuk Kim struct curseg_info *curseg; 791351df4b2SJaegeuk Kim unsigned int old_cursegno; 792351df4b2SJaegeuk Kim int type; 793351df4b2SJaegeuk Kim 794351df4b2SJaegeuk Kim type = __get_segment_type(page, p_type); 795351df4b2SJaegeuk Kim curseg = CURSEG_I(sbi, type); 796351df4b2SJaegeuk Kim 797351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 798351df4b2SJaegeuk Kim 799351df4b2SJaegeuk Kim *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 800351df4b2SJaegeuk Kim old_cursegno = curseg->segno; 801351df4b2SJaegeuk Kim 802351df4b2SJaegeuk Kim /* 803351df4b2SJaegeuk Kim * __add_sum_entry should be resided under the curseg_mutex 804351df4b2SJaegeuk Kim * because, this function updates a summary entry in the 805351df4b2SJaegeuk Kim * current summary block. 806351df4b2SJaegeuk Kim */ 807e79efe3bSHaicheng Li __add_sum_entry(sbi, type, sum); 808351df4b2SJaegeuk Kim 809351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 810351df4b2SJaegeuk Kim __refresh_next_blkoff(sbi, curseg); 811dcdfff65SJaegeuk Kim 812dcdfff65SJaegeuk Kim stat_inc_block_count(sbi, curseg); 813351df4b2SJaegeuk Kim 814351df4b2SJaegeuk Kim /* 815351df4b2SJaegeuk Kim * SIT information should be updated before segment allocation, 816351df4b2SJaegeuk Kim * since SSR needs latest valid block information. 817351df4b2SJaegeuk Kim */ 818351df4b2SJaegeuk Kim refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); 819351df4b2SJaegeuk Kim 820351df4b2SJaegeuk Kim if (!__has_curseg_space(sbi, type)) 821351df4b2SJaegeuk Kim sit_i->s_ops->allocate_segment(sbi, type, false); 822351df4b2SJaegeuk Kim 823351df4b2SJaegeuk Kim locate_dirty_segment(sbi, old_cursegno); 824351df4b2SJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 825351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 826351df4b2SJaegeuk Kim 827351df4b2SJaegeuk Kim if (p_type == NODE) 828351df4b2SJaegeuk Kim fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); 829351df4b2SJaegeuk Kim 830351df4b2SJaegeuk Kim /* writeout dirty page into bdev */ 831351df4b2SJaegeuk Kim submit_write_page(sbi, page, *new_blkaddr, p_type); 832351df4b2SJaegeuk Kim 833351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 834351df4b2SJaegeuk Kim } 835351df4b2SJaegeuk Kim 836577e3495SJaegeuk Kim void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) 837351df4b2SJaegeuk Kim { 838351df4b2SJaegeuk Kim set_page_writeback(page); 839351df4b2SJaegeuk Kim submit_write_page(sbi, page, page->index, META); 840351df4b2SJaegeuk Kim } 841351df4b2SJaegeuk Kim 842351df4b2SJaegeuk Kim void write_node_page(struct f2fs_sb_info *sbi, struct page *page, 843351df4b2SJaegeuk Kim unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr) 844351df4b2SJaegeuk Kim { 845351df4b2SJaegeuk Kim struct f2fs_summary sum; 846351df4b2SJaegeuk Kim set_summary(&sum, nid, 0, 0); 847351df4b2SJaegeuk Kim do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE); 848351df4b2SJaegeuk Kim } 849351df4b2SJaegeuk Kim 850351df4b2SJaegeuk Kim void write_data_page(struct inode *inode, struct page *page, 851351df4b2SJaegeuk Kim struct dnode_of_data *dn, block_t old_blkaddr, 852351df4b2SJaegeuk Kim block_t *new_blkaddr) 853351df4b2SJaegeuk Kim { 854351df4b2SJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 855351df4b2SJaegeuk Kim struct f2fs_summary sum; 856351df4b2SJaegeuk Kim struct node_info ni; 857351df4b2SJaegeuk Kim 8585d56b671SJaegeuk Kim f2fs_bug_on(old_blkaddr == NULL_ADDR); 859351df4b2SJaegeuk Kim get_node_info(sbi, dn->nid, &ni); 860351df4b2SJaegeuk Kim set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 861351df4b2SJaegeuk Kim 862351df4b2SJaegeuk Kim do_write_page(sbi, page, old_blkaddr, 863351df4b2SJaegeuk Kim new_blkaddr, &sum, DATA); 864351df4b2SJaegeuk Kim } 865351df4b2SJaegeuk Kim 866351df4b2SJaegeuk Kim void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page, 867351df4b2SJaegeuk Kim block_t old_blk_addr) 868351df4b2SJaegeuk Kim { 869351df4b2SJaegeuk Kim submit_write_page(sbi, page, old_blk_addr, DATA); 870351df4b2SJaegeuk Kim } 871351df4b2SJaegeuk Kim 872351df4b2SJaegeuk Kim void recover_data_page(struct f2fs_sb_info *sbi, 873351df4b2SJaegeuk Kim struct page *page, struct f2fs_summary *sum, 874351df4b2SJaegeuk Kim block_t old_blkaddr, block_t new_blkaddr) 875351df4b2SJaegeuk Kim { 876351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 877351df4b2SJaegeuk Kim struct curseg_info *curseg; 878351df4b2SJaegeuk Kim unsigned int segno, old_cursegno; 879351df4b2SJaegeuk Kim struct seg_entry *se; 880351df4b2SJaegeuk Kim int type; 881351df4b2SJaegeuk Kim 882351df4b2SJaegeuk Kim segno = GET_SEGNO(sbi, new_blkaddr); 883351df4b2SJaegeuk Kim se = get_seg_entry(sbi, segno); 884351df4b2SJaegeuk Kim type = se->type; 885351df4b2SJaegeuk Kim 886351df4b2SJaegeuk Kim if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { 887351df4b2SJaegeuk Kim if (old_blkaddr == NULL_ADDR) 888351df4b2SJaegeuk Kim type = CURSEG_COLD_DATA; 889351df4b2SJaegeuk Kim else 890351df4b2SJaegeuk Kim type = CURSEG_WARM_DATA; 891351df4b2SJaegeuk Kim } 892351df4b2SJaegeuk Kim curseg = CURSEG_I(sbi, type); 893351df4b2SJaegeuk Kim 894351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 895351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 896351df4b2SJaegeuk Kim 897351df4b2SJaegeuk Kim old_cursegno = curseg->segno; 898351df4b2SJaegeuk Kim 899351df4b2SJaegeuk Kim /* change the current segment */ 900351df4b2SJaegeuk Kim if (segno != curseg->segno) { 901351df4b2SJaegeuk Kim curseg->next_segno = segno; 902351df4b2SJaegeuk Kim change_curseg(sbi, type, true); 903351df4b2SJaegeuk Kim } 904351df4b2SJaegeuk Kim 905351df4b2SJaegeuk Kim curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & 906351df4b2SJaegeuk Kim (sbi->blocks_per_seg - 1); 907e79efe3bSHaicheng Li __add_sum_entry(sbi, type, sum); 908351df4b2SJaegeuk Kim 909351df4b2SJaegeuk Kim refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); 910351df4b2SJaegeuk Kim 911351df4b2SJaegeuk Kim locate_dirty_segment(sbi, old_cursegno); 912351df4b2SJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 913351df4b2SJaegeuk Kim 914351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 915351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 916351df4b2SJaegeuk Kim } 917351df4b2SJaegeuk Kim 918351df4b2SJaegeuk Kim void rewrite_node_page(struct f2fs_sb_info *sbi, 919351df4b2SJaegeuk Kim struct page *page, struct f2fs_summary *sum, 920351df4b2SJaegeuk Kim block_t old_blkaddr, block_t new_blkaddr) 921351df4b2SJaegeuk Kim { 922351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 923351df4b2SJaegeuk Kim int type = CURSEG_WARM_NODE; 924351df4b2SJaegeuk Kim struct curseg_info *curseg; 925351df4b2SJaegeuk Kim unsigned int segno, old_cursegno; 926351df4b2SJaegeuk Kim block_t next_blkaddr = next_blkaddr_of_node(page); 927351df4b2SJaegeuk Kim unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr); 928351df4b2SJaegeuk Kim 929351df4b2SJaegeuk Kim curseg = CURSEG_I(sbi, type); 930351df4b2SJaegeuk Kim 931351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 932351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 933351df4b2SJaegeuk Kim 934351df4b2SJaegeuk Kim segno = GET_SEGNO(sbi, new_blkaddr); 935351df4b2SJaegeuk Kim old_cursegno = curseg->segno; 936351df4b2SJaegeuk Kim 937351df4b2SJaegeuk Kim /* change the current segment */ 938351df4b2SJaegeuk Kim if (segno != curseg->segno) { 939351df4b2SJaegeuk Kim curseg->next_segno = segno; 940351df4b2SJaegeuk Kim change_curseg(sbi, type, true); 941351df4b2SJaegeuk Kim } 942351df4b2SJaegeuk Kim curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & 943351df4b2SJaegeuk Kim (sbi->blocks_per_seg - 1); 944e79efe3bSHaicheng Li __add_sum_entry(sbi, type, sum); 945351df4b2SJaegeuk Kim 946351df4b2SJaegeuk Kim /* change the current log to the next block addr in advance */ 947351df4b2SJaegeuk Kim if (next_segno != segno) { 948351df4b2SJaegeuk Kim curseg->next_segno = next_segno; 949351df4b2SJaegeuk Kim change_curseg(sbi, type, true); 950351df4b2SJaegeuk Kim } 951351df4b2SJaegeuk Kim curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) & 952351df4b2SJaegeuk Kim (sbi->blocks_per_seg - 1); 953351df4b2SJaegeuk Kim 954351df4b2SJaegeuk Kim /* rewrite node page */ 955351df4b2SJaegeuk Kim set_page_writeback(page); 956351df4b2SJaegeuk Kim submit_write_page(sbi, page, new_blkaddr, NODE); 957351df4b2SJaegeuk Kim f2fs_submit_bio(sbi, NODE, true); 958351df4b2SJaegeuk Kim refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); 959351df4b2SJaegeuk Kim 960351df4b2SJaegeuk Kim locate_dirty_segment(sbi, old_cursegno); 961351df4b2SJaegeuk Kim locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 962351df4b2SJaegeuk Kim 963351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 964351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 965351df4b2SJaegeuk Kim } 966351df4b2SJaegeuk Kim 967351df4b2SJaegeuk Kim static int read_compacted_summaries(struct f2fs_sb_info *sbi) 968351df4b2SJaegeuk Kim { 969351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 970351df4b2SJaegeuk Kim struct curseg_info *seg_i; 971351df4b2SJaegeuk Kim unsigned char *kaddr; 972351df4b2SJaegeuk Kim struct page *page; 973351df4b2SJaegeuk Kim block_t start; 974351df4b2SJaegeuk Kim int i, j, offset; 975351df4b2SJaegeuk Kim 976351df4b2SJaegeuk Kim start = start_sum_block(sbi); 977351df4b2SJaegeuk Kim 978351df4b2SJaegeuk Kim page = get_meta_page(sbi, start++); 979351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 980351df4b2SJaegeuk Kim 981351df4b2SJaegeuk Kim /* Step 1: restore nat cache */ 982351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 983351df4b2SJaegeuk Kim memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE); 984351df4b2SJaegeuk Kim 985351df4b2SJaegeuk Kim /* Step 2: restore sit cache */ 986351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 987351df4b2SJaegeuk Kim memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE, 988351df4b2SJaegeuk Kim SUM_JOURNAL_SIZE); 989351df4b2SJaegeuk Kim offset = 2 * SUM_JOURNAL_SIZE; 990351df4b2SJaegeuk Kim 991351df4b2SJaegeuk Kim /* Step 3: restore summary entries */ 992351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 993351df4b2SJaegeuk Kim unsigned short blk_off; 994351df4b2SJaegeuk Kim unsigned int segno; 995351df4b2SJaegeuk Kim 996351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, i); 997351df4b2SJaegeuk Kim segno = le32_to_cpu(ckpt->cur_data_segno[i]); 998351df4b2SJaegeuk Kim blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); 999351df4b2SJaegeuk Kim seg_i->next_segno = segno; 1000351df4b2SJaegeuk Kim reset_curseg(sbi, i, 0); 1001351df4b2SJaegeuk Kim seg_i->alloc_type = ckpt->alloc_type[i]; 1002351df4b2SJaegeuk Kim seg_i->next_blkoff = blk_off; 1003351df4b2SJaegeuk Kim 1004351df4b2SJaegeuk Kim if (seg_i->alloc_type == SSR) 1005351df4b2SJaegeuk Kim blk_off = sbi->blocks_per_seg; 1006351df4b2SJaegeuk Kim 1007351df4b2SJaegeuk Kim for (j = 0; j < blk_off; j++) { 1008351df4b2SJaegeuk Kim struct f2fs_summary *s; 1009351df4b2SJaegeuk Kim s = (struct f2fs_summary *)(kaddr + offset); 1010351df4b2SJaegeuk Kim seg_i->sum_blk->entries[j] = *s; 1011351df4b2SJaegeuk Kim offset += SUMMARY_SIZE; 1012351df4b2SJaegeuk Kim if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1013351df4b2SJaegeuk Kim SUM_FOOTER_SIZE) 1014351df4b2SJaegeuk Kim continue; 1015351df4b2SJaegeuk Kim 1016351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 1017351df4b2SJaegeuk Kim page = NULL; 1018351df4b2SJaegeuk Kim 1019351df4b2SJaegeuk Kim page = get_meta_page(sbi, start++); 1020351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 1021351df4b2SJaegeuk Kim offset = 0; 1022351df4b2SJaegeuk Kim } 1023351df4b2SJaegeuk Kim } 1024351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 1025351df4b2SJaegeuk Kim return 0; 1026351df4b2SJaegeuk Kim } 1027351df4b2SJaegeuk Kim 1028351df4b2SJaegeuk Kim static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) 1029351df4b2SJaegeuk Kim { 1030351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1031351df4b2SJaegeuk Kim struct f2fs_summary_block *sum; 1032351df4b2SJaegeuk Kim struct curseg_info *curseg; 1033351df4b2SJaegeuk Kim struct page *new; 1034351df4b2SJaegeuk Kim unsigned short blk_off; 1035351df4b2SJaegeuk Kim unsigned int segno = 0; 1036351df4b2SJaegeuk Kim block_t blk_addr = 0; 1037351df4b2SJaegeuk Kim 1038351df4b2SJaegeuk Kim /* get segment number and block addr */ 1039351df4b2SJaegeuk Kim if (IS_DATASEG(type)) { 1040351df4b2SJaegeuk Kim segno = le32_to_cpu(ckpt->cur_data_segno[type]); 1041351df4b2SJaegeuk Kim blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - 1042351df4b2SJaegeuk Kim CURSEG_HOT_DATA]); 104325ca923bSJaegeuk Kim if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) 1044351df4b2SJaegeuk Kim blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); 1045351df4b2SJaegeuk Kim else 1046351df4b2SJaegeuk Kim blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); 1047351df4b2SJaegeuk Kim } else { 1048351df4b2SJaegeuk Kim segno = le32_to_cpu(ckpt->cur_node_segno[type - 1049351df4b2SJaegeuk Kim CURSEG_HOT_NODE]); 1050351df4b2SJaegeuk Kim blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - 1051351df4b2SJaegeuk Kim CURSEG_HOT_NODE]); 105225ca923bSJaegeuk Kim if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) 1053351df4b2SJaegeuk Kim blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, 1054351df4b2SJaegeuk Kim type - CURSEG_HOT_NODE); 1055351df4b2SJaegeuk Kim else 1056351df4b2SJaegeuk Kim blk_addr = GET_SUM_BLOCK(sbi, segno); 1057351df4b2SJaegeuk Kim } 1058351df4b2SJaegeuk Kim 1059351df4b2SJaegeuk Kim new = get_meta_page(sbi, blk_addr); 1060351df4b2SJaegeuk Kim sum = (struct f2fs_summary_block *)page_address(new); 1061351df4b2SJaegeuk Kim 1062351df4b2SJaegeuk Kim if (IS_NODESEG(type)) { 106325ca923bSJaegeuk Kim if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) { 1064351df4b2SJaegeuk Kim struct f2fs_summary *ns = &sum->entries[0]; 1065351df4b2SJaegeuk Kim int i; 1066351df4b2SJaegeuk Kim for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { 1067351df4b2SJaegeuk Kim ns->version = 0; 1068351df4b2SJaegeuk Kim ns->ofs_in_node = 0; 1069351df4b2SJaegeuk Kim } 1070351df4b2SJaegeuk Kim } else { 1071351df4b2SJaegeuk Kim if (restore_node_summary(sbi, segno, sum)) { 1072351df4b2SJaegeuk Kim f2fs_put_page(new, 1); 1073351df4b2SJaegeuk Kim return -EINVAL; 1074351df4b2SJaegeuk Kim } 1075351df4b2SJaegeuk Kim } 1076351df4b2SJaegeuk Kim } 1077351df4b2SJaegeuk Kim 1078351df4b2SJaegeuk Kim /* set uncompleted segment to curseg */ 1079351df4b2SJaegeuk Kim curseg = CURSEG_I(sbi, type); 1080351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 1081351df4b2SJaegeuk Kim memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE); 1082351df4b2SJaegeuk Kim curseg->next_segno = segno; 1083351df4b2SJaegeuk Kim reset_curseg(sbi, type, 0); 1084351df4b2SJaegeuk Kim curseg->alloc_type = ckpt->alloc_type[type]; 1085351df4b2SJaegeuk Kim curseg->next_blkoff = blk_off; 1086351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 1087351df4b2SJaegeuk Kim f2fs_put_page(new, 1); 1088351df4b2SJaegeuk Kim return 0; 1089351df4b2SJaegeuk Kim } 1090351df4b2SJaegeuk Kim 1091351df4b2SJaegeuk Kim static int restore_curseg_summaries(struct f2fs_sb_info *sbi) 1092351df4b2SJaegeuk Kim { 1093351df4b2SJaegeuk Kim int type = CURSEG_HOT_DATA; 1094351df4b2SJaegeuk Kim 109525ca923bSJaegeuk Kim if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { 1096351df4b2SJaegeuk Kim /* restore for compacted data summary */ 1097351df4b2SJaegeuk Kim if (read_compacted_summaries(sbi)) 1098351df4b2SJaegeuk Kim return -EINVAL; 1099351df4b2SJaegeuk Kim type = CURSEG_HOT_NODE; 1100351df4b2SJaegeuk Kim } 1101351df4b2SJaegeuk Kim 1102351df4b2SJaegeuk Kim for (; type <= CURSEG_COLD_NODE; type++) 1103351df4b2SJaegeuk Kim if (read_normal_summaries(sbi, type)) 1104351df4b2SJaegeuk Kim return -EINVAL; 1105351df4b2SJaegeuk Kim return 0; 1106351df4b2SJaegeuk Kim } 1107351df4b2SJaegeuk Kim 1108351df4b2SJaegeuk Kim static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) 1109351df4b2SJaegeuk Kim { 1110351df4b2SJaegeuk Kim struct page *page; 1111351df4b2SJaegeuk Kim unsigned char *kaddr; 1112351df4b2SJaegeuk Kim struct f2fs_summary *summary; 1113351df4b2SJaegeuk Kim struct curseg_info *seg_i; 1114351df4b2SJaegeuk Kim int written_size = 0; 1115351df4b2SJaegeuk Kim int i, j; 1116351df4b2SJaegeuk Kim 1117351df4b2SJaegeuk Kim page = grab_meta_page(sbi, blkaddr++); 1118351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 1119351df4b2SJaegeuk Kim 1120351df4b2SJaegeuk Kim /* Step 1: write nat cache */ 1121351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 1122351df4b2SJaegeuk Kim memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE); 1123351df4b2SJaegeuk Kim written_size += SUM_JOURNAL_SIZE; 1124351df4b2SJaegeuk Kim 1125351df4b2SJaegeuk Kim /* Step 2: write sit cache */ 1126351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 1127351df4b2SJaegeuk Kim memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits, 1128351df4b2SJaegeuk Kim SUM_JOURNAL_SIZE); 1129351df4b2SJaegeuk Kim written_size += SUM_JOURNAL_SIZE; 1130351df4b2SJaegeuk Kim 1131351df4b2SJaegeuk Kim /* Step 3: write summary entries */ 1132351df4b2SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 1133351df4b2SJaegeuk Kim unsigned short blkoff; 1134351df4b2SJaegeuk Kim seg_i = CURSEG_I(sbi, i); 1135351df4b2SJaegeuk Kim if (sbi->ckpt->alloc_type[i] == SSR) 1136351df4b2SJaegeuk Kim blkoff = sbi->blocks_per_seg; 1137351df4b2SJaegeuk Kim else 1138351df4b2SJaegeuk Kim blkoff = curseg_blkoff(sbi, i); 1139351df4b2SJaegeuk Kim 1140351df4b2SJaegeuk Kim for (j = 0; j < blkoff; j++) { 1141351df4b2SJaegeuk Kim if (!page) { 1142351df4b2SJaegeuk Kim page = grab_meta_page(sbi, blkaddr++); 1143351df4b2SJaegeuk Kim kaddr = (unsigned char *)page_address(page); 1144351df4b2SJaegeuk Kim written_size = 0; 1145351df4b2SJaegeuk Kim } 1146351df4b2SJaegeuk Kim summary = (struct f2fs_summary *)(kaddr + written_size); 1147351df4b2SJaegeuk Kim *summary = seg_i->sum_blk->entries[j]; 1148351df4b2SJaegeuk Kim written_size += SUMMARY_SIZE; 1149351df4b2SJaegeuk Kim 1150351df4b2SJaegeuk Kim if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1151351df4b2SJaegeuk Kim SUM_FOOTER_SIZE) 1152351df4b2SJaegeuk Kim continue; 1153351df4b2SJaegeuk Kim 1154e8d61a74SChao Yu set_page_dirty(page); 1155351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 1156351df4b2SJaegeuk Kim page = NULL; 1157351df4b2SJaegeuk Kim } 1158351df4b2SJaegeuk Kim } 1159e8d61a74SChao Yu if (page) { 1160e8d61a74SChao Yu set_page_dirty(page); 1161351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 1162351df4b2SJaegeuk Kim } 1163e8d61a74SChao Yu } 1164351df4b2SJaegeuk Kim 1165351df4b2SJaegeuk Kim static void write_normal_summaries(struct f2fs_sb_info *sbi, 1166351df4b2SJaegeuk Kim block_t blkaddr, int type) 1167351df4b2SJaegeuk Kim { 1168351df4b2SJaegeuk Kim int i, end; 1169351df4b2SJaegeuk Kim if (IS_DATASEG(type)) 1170351df4b2SJaegeuk Kim end = type + NR_CURSEG_DATA_TYPE; 1171351df4b2SJaegeuk Kim else 1172351df4b2SJaegeuk Kim end = type + NR_CURSEG_NODE_TYPE; 1173351df4b2SJaegeuk Kim 1174351df4b2SJaegeuk Kim for (i = type; i < end; i++) { 1175351df4b2SJaegeuk Kim struct curseg_info *sum = CURSEG_I(sbi, i); 1176351df4b2SJaegeuk Kim mutex_lock(&sum->curseg_mutex); 1177351df4b2SJaegeuk Kim write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type)); 1178351df4b2SJaegeuk Kim mutex_unlock(&sum->curseg_mutex); 1179351df4b2SJaegeuk Kim } 1180351df4b2SJaegeuk Kim } 1181351df4b2SJaegeuk Kim 1182351df4b2SJaegeuk Kim void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 1183351df4b2SJaegeuk Kim { 118425ca923bSJaegeuk Kim if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) 1185351df4b2SJaegeuk Kim write_compacted_summaries(sbi, start_blk); 1186351df4b2SJaegeuk Kim else 1187351df4b2SJaegeuk Kim write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); 1188351df4b2SJaegeuk Kim } 1189351df4b2SJaegeuk Kim 1190351df4b2SJaegeuk Kim void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 1191351df4b2SJaegeuk Kim { 119225ca923bSJaegeuk Kim if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) 1193351df4b2SJaegeuk Kim write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); 1194351df4b2SJaegeuk Kim } 1195351df4b2SJaegeuk Kim 1196351df4b2SJaegeuk Kim int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type, 1197351df4b2SJaegeuk Kim unsigned int val, int alloc) 1198351df4b2SJaegeuk Kim { 1199351df4b2SJaegeuk Kim int i; 1200351df4b2SJaegeuk Kim 1201351df4b2SJaegeuk Kim if (type == NAT_JOURNAL) { 1202351df4b2SJaegeuk Kim for (i = 0; i < nats_in_cursum(sum); i++) { 1203351df4b2SJaegeuk Kim if (le32_to_cpu(nid_in_journal(sum, i)) == val) 1204351df4b2SJaegeuk Kim return i; 1205351df4b2SJaegeuk Kim } 1206351df4b2SJaegeuk Kim if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) 1207351df4b2SJaegeuk Kim return update_nats_in_cursum(sum, 1); 1208351df4b2SJaegeuk Kim } else if (type == SIT_JOURNAL) { 1209351df4b2SJaegeuk Kim for (i = 0; i < sits_in_cursum(sum); i++) 1210351df4b2SJaegeuk Kim if (le32_to_cpu(segno_in_journal(sum, i)) == val) 1211351df4b2SJaegeuk Kim return i; 1212351df4b2SJaegeuk Kim if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES) 1213351df4b2SJaegeuk Kim return update_sits_in_cursum(sum, 1); 1214351df4b2SJaegeuk Kim } 1215351df4b2SJaegeuk Kim return -1; 1216351df4b2SJaegeuk Kim } 1217351df4b2SJaegeuk Kim 1218351df4b2SJaegeuk Kim static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, 1219351df4b2SJaegeuk Kim unsigned int segno) 1220351df4b2SJaegeuk Kim { 1221351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1222351df4b2SJaegeuk Kim unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno); 1223351df4b2SJaegeuk Kim block_t blk_addr = sit_i->sit_base_addr + offset; 1224351df4b2SJaegeuk Kim 1225351df4b2SJaegeuk Kim check_seg_range(sbi, segno); 1226351df4b2SJaegeuk Kim 1227351df4b2SJaegeuk Kim /* calculate sit block address */ 1228351df4b2SJaegeuk Kim if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 1229351df4b2SJaegeuk Kim blk_addr += sit_i->sit_blocks; 1230351df4b2SJaegeuk Kim 1231351df4b2SJaegeuk Kim return get_meta_page(sbi, blk_addr); 1232351df4b2SJaegeuk Kim } 1233351df4b2SJaegeuk Kim 1234351df4b2SJaegeuk Kim static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, 1235351df4b2SJaegeuk Kim unsigned int start) 1236351df4b2SJaegeuk Kim { 1237351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1238351df4b2SJaegeuk Kim struct page *src_page, *dst_page; 1239351df4b2SJaegeuk Kim pgoff_t src_off, dst_off; 1240351df4b2SJaegeuk Kim void *src_addr, *dst_addr; 1241351df4b2SJaegeuk Kim 1242351df4b2SJaegeuk Kim src_off = current_sit_addr(sbi, start); 1243351df4b2SJaegeuk Kim dst_off = next_sit_addr(sbi, src_off); 1244351df4b2SJaegeuk Kim 1245351df4b2SJaegeuk Kim /* get current sit block page without lock */ 1246351df4b2SJaegeuk Kim src_page = get_meta_page(sbi, src_off); 1247351df4b2SJaegeuk Kim dst_page = grab_meta_page(sbi, dst_off); 12485d56b671SJaegeuk Kim f2fs_bug_on(PageDirty(src_page)); 1249351df4b2SJaegeuk Kim 1250351df4b2SJaegeuk Kim src_addr = page_address(src_page); 1251351df4b2SJaegeuk Kim dst_addr = page_address(dst_page); 1252351df4b2SJaegeuk Kim memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 1253351df4b2SJaegeuk Kim 1254351df4b2SJaegeuk Kim set_page_dirty(dst_page); 1255351df4b2SJaegeuk Kim f2fs_put_page(src_page, 1); 1256351df4b2SJaegeuk Kim 1257351df4b2SJaegeuk Kim set_to_next_sit(sit_i, start); 1258351df4b2SJaegeuk Kim 1259351df4b2SJaegeuk Kim return dst_page; 1260351df4b2SJaegeuk Kim } 1261351df4b2SJaegeuk Kim 1262351df4b2SJaegeuk Kim static bool flush_sits_in_journal(struct f2fs_sb_info *sbi) 1263351df4b2SJaegeuk Kim { 1264351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1265351df4b2SJaegeuk Kim struct f2fs_summary_block *sum = curseg->sum_blk; 1266351df4b2SJaegeuk Kim int i; 1267351df4b2SJaegeuk Kim 1268351df4b2SJaegeuk Kim /* 1269351df4b2SJaegeuk Kim * If the journal area in the current summary is full of sit entries, 1270351df4b2SJaegeuk Kim * all the sit entries will be flushed. Otherwise the sit entries 1271351df4b2SJaegeuk Kim * are not able to replace with newly hot sit entries. 1272351df4b2SJaegeuk Kim */ 1273351df4b2SJaegeuk Kim if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) { 1274351df4b2SJaegeuk Kim for (i = sits_in_cursum(sum) - 1; i >= 0; i--) { 1275351df4b2SJaegeuk Kim unsigned int segno; 1276351df4b2SJaegeuk Kim segno = le32_to_cpu(segno_in_journal(sum, i)); 1277351df4b2SJaegeuk Kim __mark_sit_entry_dirty(sbi, segno); 1278351df4b2SJaegeuk Kim } 1279351df4b2SJaegeuk Kim update_sits_in_cursum(sum, -sits_in_cursum(sum)); 1280cffbfa66SHaicheng Li return true; 1281351df4b2SJaegeuk Kim } 1282cffbfa66SHaicheng Li return false; 1283351df4b2SJaegeuk Kim } 1284351df4b2SJaegeuk Kim 12850a8165d7SJaegeuk Kim /* 1286351df4b2SJaegeuk Kim * CP calls this function, which flushes SIT entries including sit_journal, 1287351df4b2SJaegeuk Kim * and moves prefree segs to free segs. 1288351df4b2SJaegeuk Kim */ 1289351df4b2SJaegeuk Kim void flush_sit_entries(struct f2fs_sb_info *sbi) 1290351df4b2SJaegeuk Kim { 1291351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1292351df4b2SJaegeuk Kim unsigned long *bitmap = sit_i->dirty_sentries_bitmap; 1293351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1294351df4b2SJaegeuk Kim struct f2fs_summary_block *sum = curseg->sum_blk; 1295351df4b2SJaegeuk Kim unsigned long nsegs = TOTAL_SEGS(sbi); 1296351df4b2SJaegeuk Kim struct page *page = NULL; 1297351df4b2SJaegeuk Kim struct f2fs_sit_block *raw_sit = NULL; 1298351df4b2SJaegeuk Kim unsigned int start = 0, end = 0; 1299351df4b2SJaegeuk Kim unsigned int segno = -1; 1300351df4b2SJaegeuk Kim bool flushed; 1301351df4b2SJaegeuk Kim 1302351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 1303351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 1304351df4b2SJaegeuk Kim 1305351df4b2SJaegeuk Kim /* 1306351df4b2SJaegeuk Kim * "flushed" indicates whether sit entries in journal are flushed 1307351df4b2SJaegeuk Kim * to the SIT area or not. 1308351df4b2SJaegeuk Kim */ 1309351df4b2SJaegeuk Kim flushed = flush_sits_in_journal(sbi); 1310351df4b2SJaegeuk Kim 1311351df4b2SJaegeuk Kim while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) { 1312351df4b2SJaegeuk Kim struct seg_entry *se = get_seg_entry(sbi, segno); 1313351df4b2SJaegeuk Kim int sit_offset, offset; 1314351df4b2SJaegeuk Kim 1315351df4b2SJaegeuk Kim sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); 1316351df4b2SJaegeuk Kim 1317351df4b2SJaegeuk Kim if (flushed) 1318351df4b2SJaegeuk Kim goto to_sit_page; 1319351df4b2SJaegeuk Kim 1320351df4b2SJaegeuk Kim offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1); 1321351df4b2SJaegeuk Kim if (offset >= 0) { 1322351df4b2SJaegeuk Kim segno_in_journal(sum, offset) = cpu_to_le32(segno); 1323351df4b2SJaegeuk Kim seg_info_to_raw_sit(se, &sit_in_journal(sum, offset)); 1324351df4b2SJaegeuk Kim goto flush_done; 1325351df4b2SJaegeuk Kim } 1326351df4b2SJaegeuk Kim to_sit_page: 1327351df4b2SJaegeuk Kim if (!page || (start > segno) || (segno > end)) { 1328351df4b2SJaegeuk Kim if (page) { 1329351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 1330351df4b2SJaegeuk Kim page = NULL; 1331351df4b2SJaegeuk Kim } 1332351df4b2SJaegeuk Kim 1333351df4b2SJaegeuk Kim start = START_SEGNO(sit_i, segno); 1334351df4b2SJaegeuk Kim end = start + SIT_ENTRY_PER_BLOCK - 1; 1335351df4b2SJaegeuk Kim 1336351df4b2SJaegeuk Kim /* read sit block that will be updated */ 1337351df4b2SJaegeuk Kim page = get_next_sit_page(sbi, start); 1338351df4b2SJaegeuk Kim raw_sit = page_address(page); 1339351df4b2SJaegeuk Kim } 1340351df4b2SJaegeuk Kim 1341351df4b2SJaegeuk Kim /* udpate entry in SIT block */ 1342351df4b2SJaegeuk Kim seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]); 1343351df4b2SJaegeuk Kim flush_done: 1344351df4b2SJaegeuk Kim __clear_bit(segno, bitmap); 1345351df4b2SJaegeuk Kim sit_i->dirty_sentries--; 1346351df4b2SJaegeuk Kim } 1347351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 1348351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 1349351df4b2SJaegeuk Kim 1350351df4b2SJaegeuk Kim /* writeout last modified SIT block */ 1351351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 1352351df4b2SJaegeuk Kim 1353351df4b2SJaegeuk Kim set_prefree_as_free_segments(sbi); 1354351df4b2SJaegeuk Kim } 1355351df4b2SJaegeuk Kim 1356351df4b2SJaegeuk Kim static int build_sit_info(struct f2fs_sb_info *sbi) 1357351df4b2SJaegeuk Kim { 1358351df4b2SJaegeuk Kim struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 1359351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1360351df4b2SJaegeuk Kim struct sit_info *sit_i; 1361351df4b2SJaegeuk Kim unsigned int sit_segs, start; 1362351df4b2SJaegeuk Kim char *src_bitmap, *dst_bitmap; 1363351df4b2SJaegeuk Kim unsigned int bitmap_size; 1364351df4b2SJaegeuk Kim 1365351df4b2SJaegeuk Kim /* allocate memory for SIT information */ 1366351df4b2SJaegeuk Kim sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL); 1367351df4b2SJaegeuk Kim if (!sit_i) 1368351df4b2SJaegeuk Kim return -ENOMEM; 1369351df4b2SJaegeuk Kim 1370351df4b2SJaegeuk Kim SM_I(sbi)->sit_info = sit_i; 1371351df4b2SJaegeuk Kim 1372351df4b2SJaegeuk Kim sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry)); 1373351df4b2SJaegeuk Kim if (!sit_i->sentries) 1374351df4b2SJaegeuk Kim return -ENOMEM; 1375351df4b2SJaegeuk Kim 1376351df4b2SJaegeuk Kim bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); 1377351df4b2SJaegeuk Kim sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL); 1378351df4b2SJaegeuk Kim if (!sit_i->dirty_sentries_bitmap) 1379351df4b2SJaegeuk Kim return -ENOMEM; 1380351df4b2SJaegeuk Kim 1381351df4b2SJaegeuk Kim for (start = 0; start < TOTAL_SEGS(sbi); start++) { 1382351df4b2SJaegeuk Kim sit_i->sentries[start].cur_valid_map 1383351df4b2SJaegeuk Kim = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 1384351df4b2SJaegeuk Kim sit_i->sentries[start].ckpt_valid_map 1385351df4b2SJaegeuk Kim = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 1386351df4b2SJaegeuk Kim if (!sit_i->sentries[start].cur_valid_map 1387351df4b2SJaegeuk Kim || !sit_i->sentries[start].ckpt_valid_map) 1388351df4b2SJaegeuk Kim return -ENOMEM; 1389351df4b2SJaegeuk Kim } 1390351df4b2SJaegeuk Kim 1391351df4b2SJaegeuk Kim if (sbi->segs_per_sec > 1) { 139253cf9522SJaegeuk Kim sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) * 1393351df4b2SJaegeuk Kim sizeof(struct sec_entry)); 1394351df4b2SJaegeuk Kim if (!sit_i->sec_entries) 1395351df4b2SJaegeuk Kim return -ENOMEM; 1396351df4b2SJaegeuk Kim } 1397351df4b2SJaegeuk Kim 1398351df4b2SJaegeuk Kim /* get information related with SIT */ 1399351df4b2SJaegeuk Kim sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; 1400351df4b2SJaegeuk Kim 1401351df4b2SJaegeuk Kim /* setup SIT bitmap from ckeckpoint pack */ 1402351df4b2SJaegeuk Kim bitmap_size = __bitmap_size(sbi, SIT_BITMAP); 1403351df4b2SJaegeuk Kim src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); 1404351df4b2SJaegeuk Kim 140579b5793bSAlexandru Gheorghiu dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); 1406351df4b2SJaegeuk Kim if (!dst_bitmap) 1407351df4b2SJaegeuk Kim return -ENOMEM; 1408351df4b2SJaegeuk Kim 1409351df4b2SJaegeuk Kim /* init SIT information */ 1410351df4b2SJaegeuk Kim sit_i->s_ops = &default_salloc_ops; 1411351df4b2SJaegeuk Kim 1412351df4b2SJaegeuk Kim sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); 1413351df4b2SJaegeuk Kim sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; 1414351df4b2SJaegeuk Kim sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count); 1415351df4b2SJaegeuk Kim sit_i->sit_bitmap = dst_bitmap; 1416351df4b2SJaegeuk Kim sit_i->bitmap_size = bitmap_size; 1417351df4b2SJaegeuk Kim sit_i->dirty_sentries = 0; 1418351df4b2SJaegeuk Kim sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; 1419351df4b2SJaegeuk Kim sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); 1420351df4b2SJaegeuk Kim sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; 1421351df4b2SJaegeuk Kim mutex_init(&sit_i->sentry_lock); 1422351df4b2SJaegeuk Kim return 0; 1423351df4b2SJaegeuk Kim } 1424351df4b2SJaegeuk Kim 1425351df4b2SJaegeuk Kim static int build_free_segmap(struct f2fs_sb_info *sbi) 1426351df4b2SJaegeuk Kim { 1427351df4b2SJaegeuk Kim struct f2fs_sm_info *sm_info = SM_I(sbi); 1428351df4b2SJaegeuk Kim struct free_segmap_info *free_i; 1429351df4b2SJaegeuk Kim unsigned int bitmap_size, sec_bitmap_size; 1430351df4b2SJaegeuk Kim 1431351df4b2SJaegeuk Kim /* allocate memory for free segmap information */ 1432351df4b2SJaegeuk Kim free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL); 1433351df4b2SJaegeuk Kim if (!free_i) 1434351df4b2SJaegeuk Kim return -ENOMEM; 1435351df4b2SJaegeuk Kim 1436351df4b2SJaegeuk Kim SM_I(sbi)->free_info = free_i; 1437351df4b2SJaegeuk Kim 1438351df4b2SJaegeuk Kim bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); 1439351df4b2SJaegeuk Kim free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL); 1440351df4b2SJaegeuk Kim if (!free_i->free_segmap) 1441351df4b2SJaegeuk Kim return -ENOMEM; 1442351df4b2SJaegeuk Kim 144353cf9522SJaegeuk Kim sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi)); 1444351df4b2SJaegeuk Kim free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL); 1445351df4b2SJaegeuk Kim if (!free_i->free_secmap) 1446351df4b2SJaegeuk Kim return -ENOMEM; 1447351df4b2SJaegeuk Kim 1448351df4b2SJaegeuk Kim /* set all segments as dirty temporarily */ 1449351df4b2SJaegeuk Kim memset(free_i->free_segmap, 0xff, bitmap_size); 1450351df4b2SJaegeuk Kim memset(free_i->free_secmap, 0xff, sec_bitmap_size); 1451351df4b2SJaegeuk Kim 1452351df4b2SJaegeuk Kim /* init free segmap information */ 1453351df4b2SJaegeuk Kim free_i->start_segno = 1454351df4b2SJaegeuk Kim (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr); 1455351df4b2SJaegeuk Kim free_i->free_segments = 0; 1456351df4b2SJaegeuk Kim free_i->free_sections = 0; 1457351df4b2SJaegeuk Kim rwlock_init(&free_i->segmap_lock); 1458351df4b2SJaegeuk Kim return 0; 1459351df4b2SJaegeuk Kim } 1460351df4b2SJaegeuk Kim 1461351df4b2SJaegeuk Kim static int build_curseg(struct f2fs_sb_info *sbi) 1462351df4b2SJaegeuk Kim { 14631042d60fSNamjae Jeon struct curseg_info *array; 1464351df4b2SJaegeuk Kim int i; 1465351df4b2SJaegeuk Kim 1466351df4b2SJaegeuk Kim array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL); 1467351df4b2SJaegeuk Kim if (!array) 1468351df4b2SJaegeuk Kim return -ENOMEM; 1469351df4b2SJaegeuk Kim 1470351df4b2SJaegeuk Kim SM_I(sbi)->curseg_array = array; 1471351df4b2SJaegeuk Kim 1472351df4b2SJaegeuk Kim for (i = 0; i < NR_CURSEG_TYPE; i++) { 1473351df4b2SJaegeuk Kim mutex_init(&array[i].curseg_mutex); 1474351df4b2SJaegeuk Kim array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 1475351df4b2SJaegeuk Kim if (!array[i].sum_blk) 1476351df4b2SJaegeuk Kim return -ENOMEM; 1477351df4b2SJaegeuk Kim array[i].segno = NULL_SEGNO; 1478351df4b2SJaegeuk Kim array[i].next_blkoff = 0; 1479351df4b2SJaegeuk Kim } 1480351df4b2SJaegeuk Kim return restore_curseg_summaries(sbi); 1481351df4b2SJaegeuk Kim } 1482351df4b2SJaegeuk Kim 1483351df4b2SJaegeuk Kim static void build_sit_entries(struct f2fs_sb_info *sbi) 1484351df4b2SJaegeuk Kim { 1485351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1486351df4b2SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1487351df4b2SJaegeuk Kim struct f2fs_summary_block *sum = curseg->sum_blk; 1488351df4b2SJaegeuk Kim unsigned int start; 1489351df4b2SJaegeuk Kim 1490351df4b2SJaegeuk Kim for (start = 0; start < TOTAL_SEGS(sbi); start++) { 1491351df4b2SJaegeuk Kim struct seg_entry *se = &sit_i->sentries[start]; 1492351df4b2SJaegeuk Kim struct f2fs_sit_block *sit_blk; 1493351df4b2SJaegeuk Kim struct f2fs_sit_entry sit; 1494351df4b2SJaegeuk Kim struct page *page; 1495351df4b2SJaegeuk Kim int i; 1496351df4b2SJaegeuk Kim 1497351df4b2SJaegeuk Kim mutex_lock(&curseg->curseg_mutex); 1498351df4b2SJaegeuk Kim for (i = 0; i < sits_in_cursum(sum); i++) { 1499351df4b2SJaegeuk Kim if (le32_to_cpu(segno_in_journal(sum, i)) == start) { 1500351df4b2SJaegeuk Kim sit = sit_in_journal(sum, i); 1501351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 1502351df4b2SJaegeuk Kim goto got_it; 1503351df4b2SJaegeuk Kim } 1504351df4b2SJaegeuk Kim } 1505351df4b2SJaegeuk Kim mutex_unlock(&curseg->curseg_mutex); 1506351df4b2SJaegeuk Kim page = get_current_sit_page(sbi, start); 1507351df4b2SJaegeuk Kim sit_blk = (struct f2fs_sit_block *)page_address(page); 1508351df4b2SJaegeuk Kim sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; 1509351df4b2SJaegeuk Kim f2fs_put_page(page, 1); 1510351df4b2SJaegeuk Kim got_it: 1511351df4b2SJaegeuk Kim check_block_count(sbi, start, &sit); 1512351df4b2SJaegeuk Kim seg_info_from_raw_sit(se, &sit); 1513351df4b2SJaegeuk Kim if (sbi->segs_per_sec > 1) { 1514351df4b2SJaegeuk Kim struct sec_entry *e = get_sec_entry(sbi, start); 1515351df4b2SJaegeuk Kim e->valid_blocks += se->valid_blocks; 1516351df4b2SJaegeuk Kim } 1517351df4b2SJaegeuk Kim } 1518351df4b2SJaegeuk Kim } 1519351df4b2SJaegeuk Kim 1520351df4b2SJaegeuk Kim static void init_free_segmap(struct f2fs_sb_info *sbi) 1521351df4b2SJaegeuk Kim { 1522351df4b2SJaegeuk Kim unsigned int start; 1523351df4b2SJaegeuk Kim int type; 1524351df4b2SJaegeuk Kim 1525351df4b2SJaegeuk Kim for (start = 0; start < TOTAL_SEGS(sbi); start++) { 1526351df4b2SJaegeuk Kim struct seg_entry *sentry = get_seg_entry(sbi, start); 1527351df4b2SJaegeuk Kim if (!sentry->valid_blocks) 1528351df4b2SJaegeuk Kim __set_free(sbi, start); 1529351df4b2SJaegeuk Kim } 1530351df4b2SJaegeuk Kim 1531351df4b2SJaegeuk Kim /* set use the current segments */ 1532351df4b2SJaegeuk Kim for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { 1533351df4b2SJaegeuk Kim struct curseg_info *curseg_t = CURSEG_I(sbi, type); 1534351df4b2SJaegeuk Kim __set_test_and_inuse(sbi, curseg_t->segno); 1535351df4b2SJaegeuk Kim } 1536351df4b2SJaegeuk Kim } 1537351df4b2SJaegeuk Kim 1538351df4b2SJaegeuk Kim static void init_dirty_segmap(struct f2fs_sb_info *sbi) 1539351df4b2SJaegeuk Kim { 1540351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1541351df4b2SJaegeuk Kim struct free_segmap_info *free_i = FREE_I(sbi); 15428736fbf0SNamjae Jeon unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi); 1543351df4b2SJaegeuk Kim unsigned short valid_blocks; 1544351df4b2SJaegeuk Kim 15458736fbf0SNamjae Jeon while (1) { 1546351df4b2SJaegeuk Kim /* find dirty segment based on free segmap */ 15478736fbf0SNamjae Jeon segno = find_next_inuse(free_i, total_segs, offset); 15488736fbf0SNamjae Jeon if (segno >= total_segs) 1549351df4b2SJaegeuk Kim break; 1550351df4b2SJaegeuk Kim offset = segno + 1; 1551351df4b2SJaegeuk Kim valid_blocks = get_valid_blocks(sbi, segno, 0); 1552351df4b2SJaegeuk Kim if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks) 1553351df4b2SJaegeuk Kim continue; 1554351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 1555351df4b2SJaegeuk Kim __locate_dirty_segment(sbi, segno, DIRTY); 1556351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 1557351df4b2SJaegeuk Kim } 1558351df4b2SJaegeuk Kim } 1559351df4b2SJaegeuk Kim 15605ec4e49fSJaegeuk Kim static int init_victim_secmap(struct f2fs_sb_info *sbi) 1561351df4b2SJaegeuk Kim { 1562351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 15635ec4e49fSJaegeuk Kim unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi)); 1564351df4b2SJaegeuk Kim 15655ec4e49fSJaegeuk Kim dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL); 15665ec4e49fSJaegeuk Kim if (!dirty_i->victim_secmap) 1567351df4b2SJaegeuk Kim return -ENOMEM; 1568351df4b2SJaegeuk Kim return 0; 1569351df4b2SJaegeuk Kim } 1570351df4b2SJaegeuk Kim 1571351df4b2SJaegeuk Kim static int build_dirty_segmap(struct f2fs_sb_info *sbi) 1572351df4b2SJaegeuk Kim { 1573351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i; 1574351df4b2SJaegeuk Kim unsigned int bitmap_size, i; 1575351df4b2SJaegeuk Kim 1576351df4b2SJaegeuk Kim /* allocate memory for dirty segments list information */ 1577351df4b2SJaegeuk Kim dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL); 1578351df4b2SJaegeuk Kim if (!dirty_i) 1579351df4b2SJaegeuk Kim return -ENOMEM; 1580351df4b2SJaegeuk Kim 1581351df4b2SJaegeuk Kim SM_I(sbi)->dirty_info = dirty_i; 1582351df4b2SJaegeuk Kim mutex_init(&dirty_i->seglist_lock); 1583351df4b2SJaegeuk Kim 1584351df4b2SJaegeuk Kim bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); 1585351df4b2SJaegeuk Kim 1586351df4b2SJaegeuk Kim for (i = 0; i < NR_DIRTY_TYPE; i++) { 1587351df4b2SJaegeuk Kim dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL); 1588351df4b2SJaegeuk Kim if (!dirty_i->dirty_segmap[i]) 1589351df4b2SJaegeuk Kim return -ENOMEM; 1590351df4b2SJaegeuk Kim } 1591351df4b2SJaegeuk Kim 1592351df4b2SJaegeuk Kim init_dirty_segmap(sbi); 15935ec4e49fSJaegeuk Kim return init_victim_secmap(sbi); 1594351df4b2SJaegeuk Kim } 1595351df4b2SJaegeuk Kim 15960a8165d7SJaegeuk Kim /* 1597351df4b2SJaegeuk Kim * Update min, max modified time for cost-benefit GC algorithm 1598351df4b2SJaegeuk Kim */ 1599351df4b2SJaegeuk Kim static void init_min_max_mtime(struct f2fs_sb_info *sbi) 1600351df4b2SJaegeuk Kim { 1601351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1602351df4b2SJaegeuk Kim unsigned int segno; 1603351df4b2SJaegeuk Kim 1604351df4b2SJaegeuk Kim mutex_lock(&sit_i->sentry_lock); 1605351df4b2SJaegeuk Kim 1606351df4b2SJaegeuk Kim sit_i->min_mtime = LLONG_MAX; 1607351df4b2SJaegeuk Kim 1608351df4b2SJaegeuk Kim for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) { 1609351df4b2SJaegeuk Kim unsigned int i; 1610351df4b2SJaegeuk Kim unsigned long long mtime = 0; 1611351df4b2SJaegeuk Kim 1612351df4b2SJaegeuk Kim for (i = 0; i < sbi->segs_per_sec; i++) 1613351df4b2SJaegeuk Kim mtime += get_seg_entry(sbi, segno + i)->mtime; 1614351df4b2SJaegeuk Kim 1615351df4b2SJaegeuk Kim mtime = div_u64(mtime, sbi->segs_per_sec); 1616351df4b2SJaegeuk Kim 1617351df4b2SJaegeuk Kim if (sit_i->min_mtime > mtime) 1618351df4b2SJaegeuk Kim sit_i->min_mtime = mtime; 1619351df4b2SJaegeuk Kim } 1620351df4b2SJaegeuk Kim sit_i->max_mtime = get_mtime(sbi); 1621351df4b2SJaegeuk Kim mutex_unlock(&sit_i->sentry_lock); 1622351df4b2SJaegeuk Kim } 1623351df4b2SJaegeuk Kim 1624351df4b2SJaegeuk Kim int build_segment_manager(struct f2fs_sb_info *sbi) 1625351df4b2SJaegeuk Kim { 1626351df4b2SJaegeuk Kim struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 1627351df4b2SJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 16281042d60fSNamjae Jeon struct f2fs_sm_info *sm_info; 1629351df4b2SJaegeuk Kim int err; 1630351df4b2SJaegeuk Kim 1631351df4b2SJaegeuk Kim sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); 1632351df4b2SJaegeuk Kim if (!sm_info) 1633351df4b2SJaegeuk Kim return -ENOMEM; 1634351df4b2SJaegeuk Kim 1635351df4b2SJaegeuk Kim /* init sm info */ 1636351df4b2SJaegeuk Kim sbi->sm_info = sm_info; 1637351df4b2SJaegeuk Kim INIT_LIST_HEAD(&sm_info->wblist_head); 1638351df4b2SJaegeuk Kim spin_lock_init(&sm_info->wblist_lock); 1639351df4b2SJaegeuk Kim sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 1640351df4b2SJaegeuk Kim sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 1641351df4b2SJaegeuk Kim sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 1642351df4b2SJaegeuk Kim sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 1643351df4b2SJaegeuk Kim sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 1644351df4b2SJaegeuk Kim sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); 1645351df4b2SJaegeuk Kim sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 164681eb8d6eSJaegeuk Kim sm_info->rec_prefree_segments = DEF_RECLAIM_PREFREE_SEGMENTS; 1647351df4b2SJaegeuk Kim 1648351df4b2SJaegeuk Kim err = build_sit_info(sbi); 1649351df4b2SJaegeuk Kim if (err) 1650351df4b2SJaegeuk Kim return err; 1651351df4b2SJaegeuk Kim err = build_free_segmap(sbi); 1652351df4b2SJaegeuk Kim if (err) 1653351df4b2SJaegeuk Kim return err; 1654351df4b2SJaegeuk Kim err = build_curseg(sbi); 1655351df4b2SJaegeuk Kim if (err) 1656351df4b2SJaegeuk Kim return err; 1657351df4b2SJaegeuk Kim 1658351df4b2SJaegeuk Kim /* reinit free segmap based on SIT */ 1659351df4b2SJaegeuk Kim build_sit_entries(sbi); 1660351df4b2SJaegeuk Kim 1661351df4b2SJaegeuk Kim init_free_segmap(sbi); 1662351df4b2SJaegeuk Kim err = build_dirty_segmap(sbi); 1663351df4b2SJaegeuk Kim if (err) 1664351df4b2SJaegeuk Kim return err; 1665351df4b2SJaegeuk Kim 1666351df4b2SJaegeuk Kim init_min_max_mtime(sbi); 1667351df4b2SJaegeuk Kim return 0; 1668351df4b2SJaegeuk Kim } 1669351df4b2SJaegeuk Kim 1670351df4b2SJaegeuk Kim static void discard_dirty_segmap(struct f2fs_sb_info *sbi, 1671351df4b2SJaegeuk Kim enum dirty_type dirty_type) 1672351df4b2SJaegeuk Kim { 1673351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1674351df4b2SJaegeuk Kim 1675351df4b2SJaegeuk Kim mutex_lock(&dirty_i->seglist_lock); 1676351df4b2SJaegeuk Kim kfree(dirty_i->dirty_segmap[dirty_type]); 1677351df4b2SJaegeuk Kim dirty_i->nr_dirty[dirty_type] = 0; 1678351df4b2SJaegeuk Kim mutex_unlock(&dirty_i->seglist_lock); 1679351df4b2SJaegeuk Kim } 1680351df4b2SJaegeuk Kim 16815ec4e49fSJaegeuk Kim static void destroy_victim_secmap(struct f2fs_sb_info *sbi) 1682351df4b2SJaegeuk Kim { 1683351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 16845ec4e49fSJaegeuk Kim kfree(dirty_i->victim_secmap); 1685351df4b2SJaegeuk Kim } 1686351df4b2SJaegeuk Kim 1687351df4b2SJaegeuk Kim static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) 1688351df4b2SJaegeuk Kim { 1689351df4b2SJaegeuk Kim struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1690351df4b2SJaegeuk Kim int i; 1691351df4b2SJaegeuk Kim 1692351df4b2SJaegeuk Kim if (!dirty_i) 1693351df4b2SJaegeuk Kim return; 1694351df4b2SJaegeuk Kim 1695351df4b2SJaegeuk Kim /* discard pre-free/dirty segments list */ 1696351df4b2SJaegeuk Kim for (i = 0; i < NR_DIRTY_TYPE; i++) 1697351df4b2SJaegeuk Kim discard_dirty_segmap(sbi, i); 1698351df4b2SJaegeuk Kim 16995ec4e49fSJaegeuk Kim destroy_victim_secmap(sbi); 1700351df4b2SJaegeuk Kim SM_I(sbi)->dirty_info = NULL; 1701351df4b2SJaegeuk Kim kfree(dirty_i); 1702351df4b2SJaegeuk Kim } 1703351df4b2SJaegeuk Kim 1704351df4b2SJaegeuk Kim static void destroy_curseg(struct f2fs_sb_info *sbi) 1705351df4b2SJaegeuk Kim { 1706351df4b2SJaegeuk Kim struct curseg_info *array = SM_I(sbi)->curseg_array; 1707351df4b2SJaegeuk Kim int i; 1708351df4b2SJaegeuk Kim 1709351df4b2SJaegeuk Kim if (!array) 1710351df4b2SJaegeuk Kim return; 1711351df4b2SJaegeuk Kim SM_I(sbi)->curseg_array = NULL; 1712351df4b2SJaegeuk Kim for (i = 0; i < NR_CURSEG_TYPE; i++) 1713351df4b2SJaegeuk Kim kfree(array[i].sum_blk); 1714351df4b2SJaegeuk Kim kfree(array); 1715351df4b2SJaegeuk Kim } 1716351df4b2SJaegeuk Kim 1717351df4b2SJaegeuk Kim static void destroy_free_segmap(struct f2fs_sb_info *sbi) 1718351df4b2SJaegeuk Kim { 1719351df4b2SJaegeuk Kim struct free_segmap_info *free_i = SM_I(sbi)->free_info; 1720351df4b2SJaegeuk Kim if (!free_i) 1721351df4b2SJaegeuk Kim return; 1722351df4b2SJaegeuk Kim SM_I(sbi)->free_info = NULL; 1723351df4b2SJaegeuk Kim kfree(free_i->free_segmap); 1724351df4b2SJaegeuk Kim kfree(free_i->free_secmap); 1725351df4b2SJaegeuk Kim kfree(free_i); 1726351df4b2SJaegeuk Kim } 1727351df4b2SJaegeuk Kim 1728351df4b2SJaegeuk Kim static void destroy_sit_info(struct f2fs_sb_info *sbi) 1729351df4b2SJaegeuk Kim { 1730351df4b2SJaegeuk Kim struct sit_info *sit_i = SIT_I(sbi); 1731351df4b2SJaegeuk Kim unsigned int start; 1732351df4b2SJaegeuk Kim 1733351df4b2SJaegeuk Kim if (!sit_i) 1734351df4b2SJaegeuk Kim return; 1735351df4b2SJaegeuk Kim 1736351df4b2SJaegeuk Kim if (sit_i->sentries) { 1737351df4b2SJaegeuk Kim for (start = 0; start < TOTAL_SEGS(sbi); start++) { 1738351df4b2SJaegeuk Kim kfree(sit_i->sentries[start].cur_valid_map); 1739351df4b2SJaegeuk Kim kfree(sit_i->sentries[start].ckpt_valid_map); 1740351df4b2SJaegeuk Kim } 1741351df4b2SJaegeuk Kim } 1742351df4b2SJaegeuk Kim vfree(sit_i->sentries); 1743351df4b2SJaegeuk Kim vfree(sit_i->sec_entries); 1744351df4b2SJaegeuk Kim kfree(sit_i->dirty_sentries_bitmap); 1745351df4b2SJaegeuk Kim 1746351df4b2SJaegeuk Kim SM_I(sbi)->sit_info = NULL; 1747351df4b2SJaegeuk Kim kfree(sit_i->sit_bitmap); 1748351df4b2SJaegeuk Kim kfree(sit_i); 1749351df4b2SJaegeuk Kim } 1750351df4b2SJaegeuk Kim 1751351df4b2SJaegeuk Kim void destroy_segment_manager(struct f2fs_sb_info *sbi) 1752351df4b2SJaegeuk Kim { 1753351df4b2SJaegeuk Kim struct f2fs_sm_info *sm_info = SM_I(sbi); 17543b03f724SChao Yu if (!sm_info) 17553b03f724SChao Yu return; 1756351df4b2SJaegeuk Kim destroy_dirty_segmap(sbi); 1757351df4b2SJaegeuk Kim destroy_curseg(sbi); 1758351df4b2SJaegeuk Kim destroy_free_segmap(sbi); 1759351df4b2SJaegeuk Kim destroy_sit_info(sbi); 1760351df4b2SJaegeuk Kim sbi->sm_info = NULL; 1761351df4b2SJaegeuk Kim kfree(sm_info); 1762351df4b2SJaegeuk Kim } 1763