1 /* 2 * fs/f2fs/segment.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/bio.h> 14 #include <linux/blkdev.h> 15 #include <linux/prefetch.h> 16 #include <linux/vmalloc.h> 17 #include <linux/swap.h> 18 19 #include "f2fs.h" 20 #include "segment.h" 21 #include "node.h" 22 #include <trace/events/f2fs.h> 23 24 #define __reverse_ffz(x) __reverse_ffs(~(x)) 25 26 static struct kmem_cache *discard_entry_slab; 27 28 /* 29 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since 30 * MSB and LSB are reversed in a byte by f2fs_set_bit. 31 */ 32 static inline unsigned long __reverse_ffs(unsigned long word) 33 { 34 int num = 0; 35 36 #if BITS_PER_LONG == 64 37 if ((word & 0xffffffff) == 0) { 38 num += 32; 39 word >>= 32; 40 } 41 #endif 42 if ((word & 0xffff) == 0) { 43 num += 16; 44 word >>= 16; 45 } 46 if ((word & 0xff) == 0) { 47 num += 8; 48 word >>= 8; 49 } 50 if ((word & 0xf0) == 0) 51 num += 4; 52 else 53 word >>= 4; 54 if ((word & 0xc) == 0) 55 num += 2; 56 else 57 word >>= 2; 58 if ((word & 0x2) == 0) 59 num += 1; 60 return num; 61 } 62 63 /* 64 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c becasue 65 * f2fs_set_bit makes MSB and LSB reversed in a byte. 66 * Example: 67 * LSB <--> MSB 68 * f2fs_set_bit(0, bitmap) => 0000 0001 69 * f2fs_set_bit(7, bitmap) => 1000 0000 70 */ 71 static unsigned long __find_rev_next_bit(const unsigned long *addr, 72 unsigned long size, unsigned long offset) 73 { 74 const unsigned long *p = addr + BIT_WORD(offset); 75 unsigned long result = offset & ~(BITS_PER_LONG - 1); 76 unsigned long tmp; 77 unsigned long mask, submask; 78 unsigned long quot, rest; 79 80 if (offset >= size) 81 return size; 82 83 size -= result; 84 offset %= BITS_PER_LONG; 85 if (!offset) 86 goto aligned; 87 88 tmp = *(p++); 89 quot = (offset >> 3) << 3; 90 rest = offset & 0x7; 91 mask = ~0UL << quot; 92 submask = (unsigned char)(0xff << rest) >> rest; 93 submask <<= quot; 94 mask &= submask; 95 tmp &= mask; 96 if (size < BITS_PER_LONG) 97 goto found_first; 98 if (tmp) 99 goto found_middle; 100 101 size -= BITS_PER_LONG; 102 result += BITS_PER_LONG; 103 aligned: 104 while (size & ~(BITS_PER_LONG-1)) { 105 tmp = *(p++); 106 if (tmp) 107 goto found_middle; 108 result += BITS_PER_LONG; 109 size -= BITS_PER_LONG; 110 } 111 if (!size) 112 return result; 113 tmp = *p; 114 found_first: 115 tmp &= (~0UL >> (BITS_PER_LONG - size)); 116 if (tmp == 0UL) /* Are any bits set? */ 117 return result + size; /* Nope. */ 118 found_middle: 119 return result + __reverse_ffs(tmp); 120 } 121 122 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr, 123 unsigned long size, unsigned long offset) 124 { 125 const unsigned long *p = addr + BIT_WORD(offset); 126 unsigned long result = offset & ~(BITS_PER_LONG - 1); 127 unsigned long tmp; 128 unsigned long mask, submask; 129 unsigned long quot, rest; 130 131 if (offset >= size) 132 return size; 133 134 size -= result; 135 offset %= BITS_PER_LONG; 136 if (!offset) 137 goto aligned; 138 139 tmp = *(p++); 140 quot = (offset >> 3) << 3; 141 rest = offset & 0x7; 142 mask = ~(~0UL << quot); 143 submask = (unsigned char)~((unsigned char)(0xff << rest) >> rest); 144 submask <<= quot; 145 mask += submask; 146 tmp |= mask; 147 if (size < BITS_PER_LONG) 148 goto found_first; 149 if (~tmp) 150 goto found_middle; 151 152 size -= BITS_PER_LONG; 153 result += BITS_PER_LONG; 154 aligned: 155 while (size & ~(BITS_PER_LONG - 1)) { 156 tmp = *(p++); 157 if (~tmp) 158 goto found_middle; 159 result += BITS_PER_LONG; 160 size -= BITS_PER_LONG; 161 } 162 if (!size) 163 return result; 164 tmp = *p; 165 166 found_first: 167 tmp |= ~0UL << size; 168 if (tmp == ~0UL) /* Are any bits zero? */ 169 return result + size; /* Nope. */ 170 found_middle: 171 return result + __reverse_ffz(tmp); 172 } 173 174 /* 175 * This function balances dirty node and dentry pages. 176 * In addition, it controls garbage collection. 177 */ 178 void f2fs_balance_fs(struct f2fs_sb_info *sbi) 179 { 180 /* 181 * We should do GC or end up with checkpoint, if there are so many dirty 182 * dir/node pages without enough free segments. 183 */ 184 if (has_not_enough_free_secs(sbi, 0)) { 185 mutex_lock(&sbi->gc_mutex); 186 f2fs_gc(sbi); 187 } 188 } 189 190 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) 191 { 192 /* check the # of cached NAT entries and prefree segments */ 193 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) || 194 excess_prefree_segs(sbi)) 195 f2fs_sync_fs(sbi->sb, true); 196 } 197 198 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 199 enum dirty_type dirty_type) 200 { 201 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 202 203 /* need not be added */ 204 if (IS_CURSEG(sbi, segno)) 205 return; 206 207 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) 208 dirty_i->nr_dirty[dirty_type]++; 209 210 if (dirty_type == DIRTY) { 211 struct seg_entry *sentry = get_seg_entry(sbi, segno); 212 enum dirty_type t = sentry->type; 213 214 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) 215 dirty_i->nr_dirty[t]++; 216 } 217 } 218 219 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 220 enum dirty_type dirty_type) 221 { 222 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 223 224 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) 225 dirty_i->nr_dirty[dirty_type]--; 226 227 if (dirty_type == DIRTY) { 228 struct seg_entry *sentry = get_seg_entry(sbi, segno); 229 enum dirty_type t = sentry->type; 230 231 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) 232 dirty_i->nr_dirty[t]--; 233 234 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0) 235 clear_bit(GET_SECNO(sbi, segno), 236 dirty_i->victim_secmap); 237 } 238 } 239 240 /* 241 * Should not occur error such as -ENOMEM. 242 * Adding dirty entry into seglist is not critical operation. 243 * If a given segment is one of current working segments, it won't be added. 244 */ 245 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) 246 { 247 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 248 unsigned short valid_blocks; 249 250 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) 251 return; 252 253 mutex_lock(&dirty_i->seglist_lock); 254 255 valid_blocks = get_valid_blocks(sbi, segno, 0); 256 257 if (valid_blocks == 0) { 258 __locate_dirty_segment(sbi, segno, PRE); 259 __remove_dirty_segment(sbi, segno, DIRTY); 260 } else if (valid_blocks < sbi->blocks_per_seg) { 261 __locate_dirty_segment(sbi, segno, DIRTY); 262 } else { 263 /* Recovery routine with SSR needs this */ 264 __remove_dirty_segment(sbi, segno, DIRTY); 265 } 266 267 mutex_unlock(&dirty_i->seglist_lock); 268 } 269 270 static void f2fs_issue_discard(struct f2fs_sb_info *sbi, 271 block_t blkstart, block_t blklen) 272 { 273 sector_t start = SECTOR_FROM_BLOCK(sbi, blkstart); 274 sector_t len = SECTOR_FROM_BLOCK(sbi, blklen); 275 blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0); 276 trace_f2fs_issue_discard(sbi->sb, blkstart, blklen); 277 } 278 279 static void add_discard_addrs(struct f2fs_sb_info *sbi, 280 unsigned int segno, struct seg_entry *se) 281 { 282 struct list_head *head = &SM_I(sbi)->discard_list; 283 struct discard_entry *new; 284 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 285 int max_blocks = sbi->blocks_per_seg; 286 unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 287 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 288 unsigned long dmap[entries]; 289 unsigned int start = 0, end = -1; 290 int i; 291 292 if (!test_opt(sbi, DISCARD)) 293 return; 294 295 /* zero block will be discarded through the prefree list */ 296 if (!se->valid_blocks || se->valid_blocks == max_blocks) 297 return; 298 299 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ 300 for (i = 0; i < entries; i++) 301 dmap[i] = (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; 302 303 while (SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) { 304 start = __find_rev_next_bit(dmap, max_blocks, end + 1); 305 if (start >= max_blocks) 306 break; 307 308 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1); 309 310 new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS); 311 INIT_LIST_HEAD(&new->list); 312 new->blkaddr = START_BLOCK(sbi, segno) + start; 313 new->len = end - start; 314 315 list_add_tail(&new->list, head); 316 SM_I(sbi)->nr_discards += end - start; 317 } 318 } 319 320 /* 321 * Should call clear_prefree_segments after checkpoint is done. 322 */ 323 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) 324 { 325 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 326 unsigned int segno = -1; 327 unsigned int total_segs = TOTAL_SEGS(sbi); 328 329 mutex_lock(&dirty_i->seglist_lock); 330 while (1) { 331 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs, 332 segno + 1); 333 if (segno >= total_segs) 334 break; 335 __set_test_and_free(sbi, segno); 336 } 337 mutex_unlock(&dirty_i->seglist_lock); 338 } 339 340 void clear_prefree_segments(struct f2fs_sb_info *sbi) 341 { 342 struct list_head *head = &(SM_I(sbi)->discard_list); 343 struct list_head *this, *next; 344 struct discard_entry *entry; 345 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 346 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; 347 unsigned int total_segs = TOTAL_SEGS(sbi); 348 unsigned int start = 0, end = -1; 349 350 mutex_lock(&dirty_i->seglist_lock); 351 352 while (1) { 353 int i; 354 start = find_next_bit(prefree_map, total_segs, end + 1); 355 if (start >= total_segs) 356 break; 357 end = find_next_zero_bit(prefree_map, total_segs, start + 1); 358 359 for (i = start; i < end; i++) 360 clear_bit(i, prefree_map); 361 362 dirty_i->nr_dirty[PRE] -= end - start; 363 364 if (!test_opt(sbi, DISCARD)) 365 continue; 366 367 f2fs_issue_discard(sbi, START_BLOCK(sbi, start), 368 (end - start) << sbi->log_blocks_per_seg); 369 } 370 mutex_unlock(&dirty_i->seglist_lock); 371 372 /* send small discards */ 373 list_for_each_safe(this, next, head) { 374 entry = list_entry(this, struct discard_entry, list); 375 f2fs_issue_discard(sbi, entry->blkaddr, entry->len); 376 list_del(&entry->list); 377 SM_I(sbi)->nr_discards -= entry->len; 378 kmem_cache_free(discard_entry_slab, entry); 379 } 380 } 381 382 static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) 383 { 384 struct sit_info *sit_i = SIT_I(sbi); 385 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) 386 sit_i->dirty_sentries++; 387 } 388 389 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, 390 unsigned int segno, int modified) 391 { 392 struct seg_entry *se = get_seg_entry(sbi, segno); 393 se->type = type; 394 if (modified) 395 __mark_sit_entry_dirty(sbi, segno); 396 } 397 398 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) 399 { 400 struct seg_entry *se; 401 unsigned int segno, offset; 402 long int new_vblocks; 403 404 segno = GET_SEGNO(sbi, blkaddr); 405 406 se = get_seg_entry(sbi, segno); 407 new_vblocks = se->valid_blocks + del; 408 offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1); 409 410 f2fs_bug_on((new_vblocks >> (sizeof(unsigned short) << 3) || 411 (new_vblocks > sbi->blocks_per_seg))); 412 413 se->valid_blocks = new_vblocks; 414 se->mtime = get_mtime(sbi); 415 SIT_I(sbi)->max_mtime = se->mtime; 416 417 /* Update valid block bitmap */ 418 if (del > 0) { 419 if (f2fs_set_bit(offset, se->cur_valid_map)) 420 BUG(); 421 } else { 422 if (!f2fs_clear_bit(offset, se->cur_valid_map)) 423 BUG(); 424 } 425 if (!f2fs_test_bit(offset, se->ckpt_valid_map)) 426 se->ckpt_valid_blocks += del; 427 428 __mark_sit_entry_dirty(sbi, segno); 429 430 /* update total number of valid blocks to be written in ckpt area */ 431 SIT_I(sbi)->written_valid_blocks += del; 432 433 if (sbi->segs_per_sec > 1) 434 get_sec_entry(sbi, segno)->valid_blocks += del; 435 } 436 437 static void refresh_sit_entry(struct f2fs_sb_info *sbi, 438 block_t old_blkaddr, block_t new_blkaddr) 439 { 440 update_sit_entry(sbi, new_blkaddr, 1); 441 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) 442 update_sit_entry(sbi, old_blkaddr, -1); 443 } 444 445 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) 446 { 447 unsigned int segno = GET_SEGNO(sbi, addr); 448 struct sit_info *sit_i = SIT_I(sbi); 449 450 f2fs_bug_on(addr == NULL_ADDR); 451 if (addr == NEW_ADDR) 452 return; 453 454 /* add it into sit main buffer */ 455 mutex_lock(&sit_i->sentry_lock); 456 457 update_sit_entry(sbi, addr, -1); 458 459 /* add it into dirty seglist */ 460 locate_dirty_segment(sbi, segno); 461 462 mutex_unlock(&sit_i->sentry_lock); 463 } 464 465 /* 466 * This function should be resided under the curseg_mutex lock 467 */ 468 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, 469 struct f2fs_summary *sum) 470 { 471 struct curseg_info *curseg = CURSEG_I(sbi, type); 472 void *addr = curseg->sum_blk; 473 addr += curseg->next_blkoff * sizeof(struct f2fs_summary); 474 memcpy(addr, sum, sizeof(struct f2fs_summary)); 475 } 476 477 /* 478 * Calculate the number of current summary pages for writing 479 */ 480 int npages_for_summary_flush(struct f2fs_sb_info *sbi) 481 { 482 int valid_sum_count = 0; 483 int i, sum_in_page; 484 485 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 486 if (sbi->ckpt->alloc_type[i] == SSR) 487 valid_sum_count += sbi->blocks_per_seg; 488 else 489 valid_sum_count += curseg_blkoff(sbi, i); 490 } 491 492 sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE - 493 SUM_FOOTER_SIZE) / SUMMARY_SIZE; 494 if (valid_sum_count <= sum_in_page) 495 return 1; 496 else if ((valid_sum_count - sum_in_page) <= 497 (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) 498 return 2; 499 return 3; 500 } 501 502 /* 503 * Caller should put this summary page 504 */ 505 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) 506 { 507 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno)); 508 } 509 510 static void write_sum_page(struct f2fs_sb_info *sbi, 511 struct f2fs_summary_block *sum_blk, block_t blk_addr) 512 { 513 struct page *page = grab_meta_page(sbi, blk_addr); 514 void *kaddr = page_address(page); 515 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE); 516 set_page_dirty(page); 517 f2fs_put_page(page, 1); 518 } 519 520 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) 521 { 522 struct curseg_info *curseg = CURSEG_I(sbi, type); 523 unsigned int segno = curseg->segno + 1; 524 struct free_segmap_info *free_i = FREE_I(sbi); 525 526 if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec) 527 return !test_bit(segno, free_i->free_segmap); 528 return 0; 529 } 530 531 /* 532 * Find a new segment from the free segments bitmap to right order 533 * This function should be returned with success, otherwise BUG 534 */ 535 static void get_new_segment(struct f2fs_sb_info *sbi, 536 unsigned int *newseg, bool new_sec, int dir) 537 { 538 struct free_segmap_info *free_i = FREE_I(sbi); 539 unsigned int segno, secno, zoneno; 540 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone; 541 unsigned int hint = *newseg / sbi->segs_per_sec; 542 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg); 543 unsigned int left_start = hint; 544 bool init = true; 545 int go_left = 0; 546 int i; 547 548 write_lock(&free_i->segmap_lock); 549 550 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { 551 segno = find_next_zero_bit(free_i->free_segmap, 552 TOTAL_SEGS(sbi), *newseg + 1); 553 if (segno - *newseg < sbi->segs_per_sec - 554 (*newseg % sbi->segs_per_sec)) 555 goto got_it; 556 } 557 find_other_zone: 558 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint); 559 if (secno >= TOTAL_SECS(sbi)) { 560 if (dir == ALLOC_RIGHT) { 561 secno = find_next_zero_bit(free_i->free_secmap, 562 TOTAL_SECS(sbi), 0); 563 f2fs_bug_on(secno >= TOTAL_SECS(sbi)); 564 } else { 565 go_left = 1; 566 left_start = hint - 1; 567 } 568 } 569 if (go_left == 0) 570 goto skip_left; 571 572 while (test_bit(left_start, free_i->free_secmap)) { 573 if (left_start > 0) { 574 left_start--; 575 continue; 576 } 577 left_start = find_next_zero_bit(free_i->free_secmap, 578 TOTAL_SECS(sbi), 0); 579 f2fs_bug_on(left_start >= TOTAL_SECS(sbi)); 580 break; 581 } 582 secno = left_start; 583 skip_left: 584 hint = secno; 585 segno = secno * sbi->segs_per_sec; 586 zoneno = secno / sbi->secs_per_zone; 587 588 /* give up on finding another zone */ 589 if (!init) 590 goto got_it; 591 if (sbi->secs_per_zone == 1) 592 goto got_it; 593 if (zoneno == old_zoneno) 594 goto got_it; 595 if (dir == ALLOC_LEFT) { 596 if (!go_left && zoneno + 1 >= total_zones) 597 goto got_it; 598 if (go_left && zoneno == 0) 599 goto got_it; 600 } 601 for (i = 0; i < NR_CURSEG_TYPE; i++) 602 if (CURSEG_I(sbi, i)->zone == zoneno) 603 break; 604 605 if (i < NR_CURSEG_TYPE) { 606 /* zone is in user, try another */ 607 if (go_left) 608 hint = zoneno * sbi->secs_per_zone - 1; 609 else if (zoneno + 1 >= total_zones) 610 hint = 0; 611 else 612 hint = (zoneno + 1) * sbi->secs_per_zone; 613 init = false; 614 goto find_other_zone; 615 } 616 got_it: 617 /* set it as dirty segment in free segmap */ 618 f2fs_bug_on(test_bit(segno, free_i->free_segmap)); 619 __set_inuse(sbi, segno); 620 *newseg = segno; 621 write_unlock(&free_i->segmap_lock); 622 } 623 624 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) 625 { 626 struct curseg_info *curseg = CURSEG_I(sbi, type); 627 struct summary_footer *sum_footer; 628 629 curseg->segno = curseg->next_segno; 630 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno); 631 curseg->next_blkoff = 0; 632 curseg->next_segno = NULL_SEGNO; 633 634 sum_footer = &(curseg->sum_blk->footer); 635 memset(sum_footer, 0, sizeof(struct summary_footer)); 636 if (IS_DATASEG(type)) 637 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); 638 if (IS_NODESEG(type)) 639 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); 640 __set_sit_entry_type(sbi, type, curseg->segno, modified); 641 } 642 643 /* 644 * Allocate a current working segment. 645 * This function always allocates a free segment in LFS manner. 646 */ 647 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) 648 { 649 struct curseg_info *curseg = CURSEG_I(sbi, type); 650 unsigned int segno = curseg->segno; 651 int dir = ALLOC_LEFT; 652 653 write_sum_page(sbi, curseg->sum_blk, 654 GET_SUM_BLOCK(sbi, segno)); 655 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA) 656 dir = ALLOC_RIGHT; 657 658 if (test_opt(sbi, NOHEAP)) 659 dir = ALLOC_RIGHT; 660 661 get_new_segment(sbi, &segno, new_sec, dir); 662 curseg->next_segno = segno; 663 reset_curseg(sbi, type, 1); 664 curseg->alloc_type = LFS; 665 } 666 667 static void __next_free_blkoff(struct f2fs_sb_info *sbi, 668 struct curseg_info *seg, block_t start) 669 { 670 struct seg_entry *se = get_seg_entry(sbi, seg->segno); 671 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 672 unsigned long target_map[entries]; 673 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 674 unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 675 int i, pos; 676 677 for (i = 0; i < entries; i++) 678 target_map[i] = ckpt_map[i] | cur_map[i]; 679 680 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start); 681 682 seg->next_blkoff = pos; 683 } 684 685 /* 686 * If a segment is written by LFS manner, next block offset is just obtained 687 * by increasing the current block offset. However, if a segment is written by 688 * SSR manner, next block offset obtained by calling __next_free_blkoff 689 */ 690 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, 691 struct curseg_info *seg) 692 { 693 if (seg->alloc_type == SSR) 694 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1); 695 else 696 seg->next_blkoff++; 697 } 698 699 /* 700 * This function always allocates a used segment (from dirty seglist) by SSR 701 * manner, so it should recover the existing segment information of valid blocks 702 */ 703 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse) 704 { 705 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 706 struct curseg_info *curseg = CURSEG_I(sbi, type); 707 unsigned int new_segno = curseg->next_segno; 708 struct f2fs_summary_block *sum_node; 709 struct page *sum_page; 710 711 write_sum_page(sbi, curseg->sum_blk, 712 GET_SUM_BLOCK(sbi, curseg->segno)); 713 __set_test_and_inuse(sbi, new_segno); 714 715 mutex_lock(&dirty_i->seglist_lock); 716 __remove_dirty_segment(sbi, new_segno, PRE); 717 __remove_dirty_segment(sbi, new_segno, DIRTY); 718 mutex_unlock(&dirty_i->seglist_lock); 719 720 reset_curseg(sbi, type, 1); 721 curseg->alloc_type = SSR; 722 __next_free_blkoff(sbi, curseg, 0); 723 724 if (reuse) { 725 sum_page = get_sum_page(sbi, new_segno); 726 sum_node = (struct f2fs_summary_block *)page_address(sum_page); 727 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); 728 f2fs_put_page(sum_page, 1); 729 } 730 } 731 732 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) 733 { 734 struct curseg_info *curseg = CURSEG_I(sbi, type); 735 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops; 736 737 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0)) 738 return v_ops->get_victim(sbi, 739 &(curseg)->next_segno, BG_GC, type, SSR); 740 741 /* For data segments, let's do SSR more intensively */ 742 for (; type >= CURSEG_HOT_DATA; type--) 743 if (v_ops->get_victim(sbi, &(curseg)->next_segno, 744 BG_GC, type, SSR)) 745 return 1; 746 return 0; 747 } 748 749 /* 750 * flush out current segment and replace it with new segment 751 * This function should be returned with success, otherwise BUG 752 */ 753 static void allocate_segment_by_default(struct f2fs_sb_info *sbi, 754 int type, bool force) 755 { 756 struct curseg_info *curseg = CURSEG_I(sbi, type); 757 758 if (force) 759 new_curseg(sbi, type, true); 760 else if (type == CURSEG_WARM_NODE) 761 new_curseg(sbi, type, false); 762 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type)) 763 new_curseg(sbi, type, false); 764 else if (need_SSR(sbi) && get_ssr_segment(sbi, type)) 765 change_curseg(sbi, type, true); 766 else 767 new_curseg(sbi, type, false); 768 769 stat_inc_seg_type(sbi, curseg); 770 } 771 772 void allocate_new_segments(struct f2fs_sb_info *sbi) 773 { 774 struct curseg_info *curseg; 775 unsigned int old_curseg; 776 int i; 777 778 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 779 curseg = CURSEG_I(sbi, i); 780 old_curseg = curseg->segno; 781 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); 782 locate_dirty_segment(sbi, old_curseg); 783 } 784 } 785 786 static const struct segment_allocation default_salloc_ops = { 787 .allocate_segment = allocate_segment_by_default, 788 }; 789 790 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) 791 { 792 struct curseg_info *curseg = CURSEG_I(sbi, type); 793 if (curseg->next_blkoff < sbi->blocks_per_seg) 794 return true; 795 return false; 796 } 797 798 static int __get_segment_type_2(struct page *page, enum page_type p_type) 799 { 800 if (p_type == DATA) 801 return CURSEG_HOT_DATA; 802 else 803 return CURSEG_HOT_NODE; 804 } 805 806 static int __get_segment_type_4(struct page *page, enum page_type p_type) 807 { 808 if (p_type == DATA) { 809 struct inode *inode = page->mapping->host; 810 811 if (S_ISDIR(inode->i_mode)) 812 return CURSEG_HOT_DATA; 813 else 814 return CURSEG_COLD_DATA; 815 } else { 816 if (IS_DNODE(page) && !is_cold_node(page)) 817 return CURSEG_HOT_NODE; 818 else 819 return CURSEG_COLD_NODE; 820 } 821 } 822 823 static int __get_segment_type_6(struct page *page, enum page_type p_type) 824 { 825 if (p_type == DATA) { 826 struct inode *inode = page->mapping->host; 827 828 if (S_ISDIR(inode->i_mode)) 829 return CURSEG_HOT_DATA; 830 else if (is_cold_data(page) || file_is_cold(inode)) 831 return CURSEG_COLD_DATA; 832 else 833 return CURSEG_WARM_DATA; 834 } else { 835 if (IS_DNODE(page)) 836 return is_cold_node(page) ? CURSEG_WARM_NODE : 837 CURSEG_HOT_NODE; 838 else 839 return CURSEG_COLD_NODE; 840 } 841 } 842 843 static int __get_segment_type(struct page *page, enum page_type p_type) 844 { 845 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 846 switch (sbi->active_logs) { 847 case 2: 848 return __get_segment_type_2(page, p_type); 849 case 4: 850 return __get_segment_type_4(page, p_type); 851 } 852 /* NR_CURSEG_TYPE(6) logs by default */ 853 f2fs_bug_on(sbi->active_logs != NR_CURSEG_TYPE); 854 return __get_segment_type_6(page, p_type); 855 } 856 857 void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 858 block_t old_blkaddr, block_t *new_blkaddr, 859 struct f2fs_summary *sum, int type) 860 { 861 struct sit_info *sit_i = SIT_I(sbi); 862 struct curseg_info *curseg; 863 unsigned int old_cursegno; 864 865 curseg = CURSEG_I(sbi, type); 866 867 mutex_lock(&curseg->curseg_mutex); 868 869 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 870 old_cursegno = curseg->segno; 871 872 /* 873 * __add_sum_entry should be resided under the curseg_mutex 874 * because, this function updates a summary entry in the 875 * current summary block. 876 */ 877 __add_sum_entry(sbi, type, sum); 878 879 mutex_lock(&sit_i->sentry_lock); 880 __refresh_next_blkoff(sbi, curseg); 881 882 stat_inc_block_count(sbi, curseg); 883 884 /* 885 * SIT information should be updated before segment allocation, 886 * since SSR needs latest valid block information. 887 */ 888 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); 889 890 if (!__has_curseg_space(sbi, type)) 891 sit_i->s_ops->allocate_segment(sbi, type, false); 892 893 locate_dirty_segment(sbi, old_cursegno); 894 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 895 mutex_unlock(&sit_i->sentry_lock); 896 897 if (page && IS_NODESEG(type)) 898 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); 899 900 mutex_unlock(&curseg->curseg_mutex); 901 } 902 903 static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, 904 block_t old_blkaddr, block_t *new_blkaddr, 905 struct f2fs_summary *sum, struct f2fs_io_info *fio) 906 { 907 int type = __get_segment_type(page, fio->type); 908 909 allocate_data_block(sbi, page, old_blkaddr, new_blkaddr, sum, type); 910 911 /* writeout dirty page into bdev */ 912 f2fs_submit_page_mbio(sbi, page, *new_blkaddr, fio); 913 } 914 915 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) 916 { 917 struct f2fs_io_info fio = { 918 .type = META, 919 .rw = WRITE_SYNC | REQ_META | REQ_PRIO 920 }; 921 922 set_page_writeback(page); 923 f2fs_submit_page_mbio(sbi, page, page->index, &fio); 924 } 925 926 void write_node_page(struct f2fs_sb_info *sbi, struct page *page, 927 struct f2fs_io_info *fio, 928 unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr) 929 { 930 struct f2fs_summary sum; 931 set_summary(&sum, nid, 0, 0); 932 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, fio); 933 } 934 935 void write_data_page(struct page *page, struct dnode_of_data *dn, 936 block_t *new_blkaddr, struct f2fs_io_info *fio) 937 { 938 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 939 struct f2fs_summary sum; 940 struct node_info ni; 941 942 f2fs_bug_on(dn->data_blkaddr == NULL_ADDR); 943 get_node_info(sbi, dn->nid, &ni); 944 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 945 946 do_write_page(sbi, page, dn->data_blkaddr, new_blkaddr, &sum, fio); 947 } 948 949 void rewrite_data_page(struct page *page, block_t old_blkaddr, 950 struct f2fs_io_info *fio) 951 { 952 struct inode *inode = page->mapping->host; 953 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 954 f2fs_submit_page_mbio(sbi, page, old_blkaddr, fio); 955 } 956 957 void recover_data_page(struct f2fs_sb_info *sbi, 958 struct page *page, struct f2fs_summary *sum, 959 block_t old_blkaddr, block_t new_blkaddr) 960 { 961 struct sit_info *sit_i = SIT_I(sbi); 962 struct curseg_info *curseg; 963 unsigned int segno, old_cursegno; 964 struct seg_entry *se; 965 int type; 966 967 segno = GET_SEGNO(sbi, new_blkaddr); 968 se = get_seg_entry(sbi, segno); 969 type = se->type; 970 971 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { 972 if (old_blkaddr == NULL_ADDR) 973 type = CURSEG_COLD_DATA; 974 else 975 type = CURSEG_WARM_DATA; 976 } 977 curseg = CURSEG_I(sbi, type); 978 979 mutex_lock(&curseg->curseg_mutex); 980 mutex_lock(&sit_i->sentry_lock); 981 982 old_cursegno = curseg->segno; 983 984 /* change the current segment */ 985 if (segno != curseg->segno) { 986 curseg->next_segno = segno; 987 change_curseg(sbi, type, true); 988 } 989 990 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & 991 (sbi->blocks_per_seg - 1); 992 __add_sum_entry(sbi, type, sum); 993 994 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); 995 996 locate_dirty_segment(sbi, old_cursegno); 997 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 998 999 mutex_unlock(&sit_i->sentry_lock); 1000 mutex_unlock(&curseg->curseg_mutex); 1001 } 1002 1003 void rewrite_node_page(struct f2fs_sb_info *sbi, 1004 struct page *page, struct f2fs_summary *sum, 1005 block_t old_blkaddr, block_t new_blkaddr) 1006 { 1007 struct sit_info *sit_i = SIT_I(sbi); 1008 int type = CURSEG_WARM_NODE; 1009 struct curseg_info *curseg; 1010 unsigned int segno, old_cursegno; 1011 block_t next_blkaddr = next_blkaddr_of_node(page); 1012 unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr); 1013 struct f2fs_io_info fio = { 1014 .type = NODE, 1015 .rw = WRITE_SYNC, 1016 }; 1017 1018 curseg = CURSEG_I(sbi, type); 1019 1020 mutex_lock(&curseg->curseg_mutex); 1021 mutex_lock(&sit_i->sentry_lock); 1022 1023 segno = GET_SEGNO(sbi, new_blkaddr); 1024 old_cursegno = curseg->segno; 1025 1026 /* change the current segment */ 1027 if (segno != curseg->segno) { 1028 curseg->next_segno = segno; 1029 change_curseg(sbi, type, true); 1030 } 1031 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & 1032 (sbi->blocks_per_seg - 1); 1033 __add_sum_entry(sbi, type, sum); 1034 1035 /* change the current log to the next block addr in advance */ 1036 if (next_segno != segno) { 1037 curseg->next_segno = next_segno; 1038 change_curseg(sbi, type, true); 1039 } 1040 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) & 1041 (sbi->blocks_per_seg - 1); 1042 1043 /* rewrite node page */ 1044 set_page_writeback(page); 1045 f2fs_submit_page_mbio(sbi, page, new_blkaddr, &fio); 1046 f2fs_submit_merged_bio(sbi, NODE, WRITE); 1047 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); 1048 1049 locate_dirty_segment(sbi, old_cursegno); 1050 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 1051 1052 mutex_unlock(&sit_i->sentry_lock); 1053 mutex_unlock(&curseg->curseg_mutex); 1054 } 1055 1056 void f2fs_wait_on_page_writeback(struct page *page, 1057 enum page_type type) 1058 { 1059 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 1060 if (PageWriteback(page)) { 1061 f2fs_submit_merged_bio(sbi, type, WRITE); 1062 wait_on_page_writeback(page); 1063 } 1064 } 1065 1066 static int read_compacted_summaries(struct f2fs_sb_info *sbi) 1067 { 1068 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1069 struct curseg_info *seg_i; 1070 unsigned char *kaddr; 1071 struct page *page; 1072 block_t start; 1073 int i, j, offset; 1074 1075 start = start_sum_block(sbi); 1076 1077 page = get_meta_page(sbi, start++); 1078 kaddr = (unsigned char *)page_address(page); 1079 1080 /* Step 1: restore nat cache */ 1081 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 1082 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE); 1083 1084 /* Step 2: restore sit cache */ 1085 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 1086 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE, 1087 SUM_JOURNAL_SIZE); 1088 offset = 2 * SUM_JOURNAL_SIZE; 1089 1090 /* Step 3: restore summary entries */ 1091 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 1092 unsigned short blk_off; 1093 unsigned int segno; 1094 1095 seg_i = CURSEG_I(sbi, i); 1096 segno = le32_to_cpu(ckpt->cur_data_segno[i]); 1097 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); 1098 seg_i->next_segno = segno; 1099 reset_curseg(sbi, i, 0); 1100 seg_i->alloc_type = ckpt->alloc_type[i]; 1101 seg_i->next_blkoff = blk_off; 1102 1103 if (seg_i->alloc_type == SSR) 1104 blk_off = sbi->blocks_per_seg; 1105 1106 for (j = 0; j < blk_off; j++) { 1107 struct f2fs_summary *s; 1108 s = (struct f2fs_summary *)(kaddr + offset); 1109 seg_i->sum_blk->entries[j] = *s; 1110 offset += SUMMARY_SIZE; 1111 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1112 SUM_FOOTER_SIZE) 1113 continue; 1114 1115 f2fs_put_page(page, 1); 1116 page = NULL; 1117 1118 page = get_meta_page(sbi, start++); 1119 kaddr = (unsigned char *)page_address(page); 1120 offset = 0; 1121 } 1122 } 1123 f2fs_put_page(page, 1); 1124 return 0; 1125 } 1126 1127 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) 1128 { 1129 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1130 struct f2fs_summary_block *sum; 1131 struct curseg_info *curseg; 1132 struct page *new; 1133 unsigned short blk_off; 1134 unsigned int segno = 0; 1135 block_t blk_addr = 0; 1136 1137 /* get segment number and block addr */ 1138 if (IS_DATASEG(type)) { 1139 segno = le32_to_cpu(ckpt->cur_data_segno[type]); 1140 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - 1141 CURSEG_HOT_DATA]); 1142 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) 1143 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); 1144 else 1145 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); 1146 } else { 1147 segno = le32_to_cpu(ckpt->cur_node_segno[type - 1148 CURSEG_HOT_NODE]); 1149 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - 1150 CURSEG_HOT_NODE]); 1151 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) 1152 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, 1153 type - CURSEG_HOT_NODE); 1154 else 1155 blk_addr = GET_SUM_BLOCK(sbi, segno); 1156 } 1157 1158 new = get_meta_page(sbi, blk_addr); 1159 sum = (struct f2fs_summary_block *)page_address(new); 1160 1161 if (IS_NODESEG(type)) { 1162 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) { 1163 struct f2fs_summary *ns = &sum->entries[0]; 1164 int i; 1165 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { 1166 ns->version = 0; 1167 ns->ofs_in_node = 0; 1168 } 1169 } else { 1170 if (restore_node_summary(sbi, segno, sum)) { 1171 f2fs_put_page(new, 1); 1172 return -EINVAL; 1173 } 1174 } 1175 } 1176 1177 /* set uncompleted segment to curseg */ 1178 curseg = CURSEG_I(sbi, type); 1179 mutex_lock(&curseg->curseg_mutex); 1180 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE); 1181 curseg->next_segno = segno; 1182 reset_curseg(sbi, type, 0); 1183 curseg->alloc_type = ckpt->alloc_type[type]; 1184 curseg->next_blkoff = blk_off; 1185 mutex_unlock(&curseg->curseg_mutex); 1186 f2fs_put_page(new, 1); 1187 return 0; 1188 } 1189 1190 static int restore_curseg_summaries(struct f2fs_sb_info *sbi) 1191 { 1192 int type = CURSEG_HOT_DATA; 1193 1194 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { 1195 /* restore for compacted data summary */ 1196 if (read_compacted_summaries(sbi)) 1197 return -EINVAL; 1198 type = CURSEG_HOT_NODE; 1199 } 1200 1201 for (; type <= CURSEG_COLD_NODE; type++) 1202 if (read_normal_summaries(sbi, type)) 1203 return -EINVAL; 1204 return 0; 1205 } 1206 1207 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) 1208 { 1209 struct page *page; 1210 unsigned char *kaddr; 1211 struct f2fs_summary *summary; 1212 struct curseg_info *seg_i; 1213 int written_size = 0; 1214 int i, j; 1215 1216 page = grab_meta_page(sbi, blkaddr++); 1217 kaddr = (unsigned char *)page_address(page); 1218 1219 /* Step 1: write nat cache */ 1220 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 1221 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE); 1222 written_size += SUM_JOURNAL_SIZE; 1223 1224 /* Step 2: write sit cache */ 1225 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 1226 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits, 1227 SUM_JOURNAL_SIZE); 1228 written_size += SUM_JOURNAL_SIZE; 1229 1230 /* Step 3: write summary entries */ 1231 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 1232 unsigned short blkoff; 1233 seg_i = CURSEG_I(sbi, i); 1234 if (sbi->ckpt->alloc_type[i] == SSR) 1235 blkoff = sbi->blocks_per_seg; 1236 else 1237 blkoff = curseg_blkoff(sbi, i); 1238 1239 for (j = 0; j < blkoff; j++) { 1240 if (!page) { 1241 page = grab_meta_page(sbi, blkaddr++); 1242 kaddr = (unsigned char *)page_address(page); 1243 written_size = 0; 1244 } 1245 summary = (struct f2fs_summary *)(kaddr + written_size); 1246 *summary = seg_i->sum_blk->entries[j]; 1247 written_size += SUMMARY_SIZE; 1248 1249 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1250 SUM_FOOTER_SIZE) 1251 continue; 1252 1253 set_page_dirty(page); 1254 f2fs_put_page(page, 1); 1255 page = NULL; 1256 } 1257 } 1258 if (page) { 1259 set_page_dirty(page); 1260 f2fs_put_page(page, 1); 1261 } 1262 } 1263 1264 static void write_normal_summaries(struct f2fs_sb_info *sbi, 1265 block_t blkaddr, int type) 1266 { 1267 int i, end; 1268 if (IS_DATASEG(type)) 1269 end = type + NR_CURSEG_DATA_TYPE; 1270 else 1271 end = type + NR_CURSEG_NODE_TYPE; 1272 1273 for (i = type; i < end; i++) { 1274 struct curseg_info *sum = CURSEG_I(sbi, i); 1275 mutex_lock(&sum->curseg_mutex); 1276 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type)); 1277 mutex_unlock(&sum->curseg_mutex); 1278 } 1279 } 1280 1281 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 1282 { 1283 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) 1284 write_compacted_summaries(sbi, start_blk); 1285 else 1286 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); 1287 } 1288 1289 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 1290 { 1291 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) 1292 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); 1293 } 1294 1295 int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type, 1296 unsigned int val, int alloc) 1297 { 1298 int i; 1299 1300 if (type == NAT_JOURNAL) { 1301 for (i = 0; i < nats_in_cursum(sum); i++) { 1302 if (le32_to_cpu(nid_in_journal(sum, i)) == val) 1303 return i; 1304 } 1305 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) 1306 return update_nats_in_cursum(sum, 1); 1307 } else if (type == SIT_JOURNAL) { 1308 for (i = 0; i < sits_in_cursum(sum); i++) 1309 if (le32_to_cpu(segno_in_journal(sum, i)) == val) 1310 return i; 1311 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES) 1312 return update_sits_in_cursum(sum, 1); 1313 } 1314 return -1; 1315 } 1316 1317 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, 1318 unsigned int segno) 1319 { 1320 struct sit_info *sit_i = SIT_I(sbi); 1321 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno); 1322 block_t blk_addr = sit_i->sit_base_addr + offset; 1323 1324 check_seg_range(sbi, segno); 1325 1326 /* calculate sit block address */ 1327 if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 1328 blk_addr += sit_i->sit_blocks; 1329 1330 return get_meta_page(sbi, blk_addr); 1331 } 1332 1333 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, 1334 unsigned int start) 1335 { 1336 struct sit_info *sit_i = SIT_I(sbi); 1337 struct page *src_page, *dst_page; 1338 pgoff_t src_off, dst_off; 1339 void *src_addr, *dst_addr; 1340 1341 src_off = current_sit_addr(sbi, start); 1342 dst_off = next_sit_addr(sbi, src_off); 1343 1344 /* get current sit block page without lock */ 1345 src_page = get_meta_page(sbi, src_off); 1346 dst_page = grab_meta_page(sbi, dst_off); 1347 f2fs_bug_on(PageDirty(src_page)); 1348 1349 src_addr = page_address(src_page); 1350 dst_addr = page_address(dst_page); 1351 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 1352 1353 set_page_dirty(dst_page); 1354 f2fs_put_page(src_page, 1); 1355 1356 set_to_next_sit(sit_i, start); 1357 1358 return dst_page; 1359 } 1360 1361 static bool flush_sits_in_journal(struct f2fs_sb_info *sbi) 1362 { 1363 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1364 struct f2fs_summary_block *sum = curseg->sum_blk; 1365 int i; 1366 1367 /* 1368 * If the journal area in the current summary is full of sit entries, 1369 * all the sit entries will be flushed. Otherwise the sit entries 1370 * are not able to replace with newly hot sit entries. 1371 */ 1372 if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) { 1373 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) { 1374 unsigned int segno; 1375 segno = le32_to_cpu(segno_in_journal(sum, i)); 1376 __mark_sit_entry_dirty(sbi, segno); 1377 } 1378 update_sits_in_cursum(sum, -sits_in_cursum(sum)); 1379 return true; 1380 } 1381 return false; 1382 } 1383 1384 /* 1385 * CP calls this function, which flushes SIT entries including sit_journal, 1386 * and moves prefree segs to free segs. 1387 */ 1388 void flush_sit_entries(struct f2fs_sb_info *sbi) 1389 { 1390 struct sit_info *sit_i = SIT_I(sbi); 1391 unsigned long *bitmap = sit_i->dirty_sentries_bitmap; 1392 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1393 struct f2fs_summary_block *sum = curseg->sum_blk; 1394 unsigned long nsegs = TOTAL_SEGS(sbi); 1395 struct page *page = NULL; 1396 struct f2fs_sit_block *raw_sit = NULL; 1397 unsigned int start = 0, end = 0; 1398 unsigned int segno = -1; 1399 bool flushed; 1400 1401 mutex_lock(&curseg->curseg_mutex); 1402 mutex_lock(&sit_i->sentry_lock); 1403 1404 /* 1405 * "flushed" indicates whether sit entries in journal are flushed 1406 * to the SIT area or not. 1407 */ 1408 flushed = flush_sits_in_journal(sbi); 1409 1410 while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) { 1411 struct seg_entry *se = get_seg_entry(sbi, segno); 1412 int sit_offset, offset; 1413 1414 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); 1415 1416 /* add discard candidates */ 1417 if (SM_I(sbi)->nr_discards < SM_I(sbi)->max_discards) 1418 add_discard_addrs(sbi, segno, se); 1419 1420 if (flushed) 1421 goto to_sit_page; 1422 1423 offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1); 1424 if (offset >= 0) { 1425 segno_in_journal(sum, offset) = cpu_to_le32(segno); 1426 seg_info_to_raw_sit(se, &sit_in_journal(sum, offset)); 1427 goto flush_done; 1428 } 1429 to_sit_page: 1430 if (!page || (start > segno) || (segno > end)) { 1431 if (page) { 1432 f2fs_put_page(page, 1); 1433 page = NULL; 1434 } 1435 1436 start = START_SEGNO(sit_i, segno); 1437 end = start + SIT_ENTRY_PER_BLOCK - 1; 1438 1439 /* read sit block that will be updated */ 1440 page = get_next_sit_page(sbi, start); 1441 raw_sit = page_address(page); 1442 } 1443 1444 /* udpate entry in SIT block */ 1445 seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]); 1446 flush_done: 1447 __clear_bit(segno, bitmap); 1448 sit_i->dirty_sentries--; 1449 } 1450 mutex_unlock(&sit_i->sentry_lock); 1451 mutex_unlock(&curseg->curseg_mutex); 1452 1453 /* writeout last modified SIT block */ 1454 f2fs_put_page(page, 1); 1455 1456 set_prefree_as_free_segments(sbi); 1457 } 1458 1459 static int build_sit_info(struct f2fs_sb_info *sbi) 1460 { 1461 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 1462 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1463 struct sit_info *sit_i; 1464 unsigned int sit_segs, start; 1465 char *src_bitmap, *dst_bitmap; 1466 unsigned int bitmap_size; 1467 1468 /* allocate memory for SIT information */ 1469 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL); 1470 if (!sit_i) 1471 return -ENOMEM; 1472 1473 SM_I(sbi)->sit_info = sit_i; 1474 1475 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry)); 1476 if (!sit_i->sentries) 1477 return -ENOMEM; 1478 1479 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); 1480 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL); 1481 if (!sit_i->dirty_sentries_bitmap) 1482 return -ENOMEM; 1483 1484 for (start = 0; start < TOTAL_SEGS(sbi); start++) { 1485 sit_i->sentries[start].cur_valid_map 1486 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 1487 sit_i->sentries[start].ckpt_valid_map 1488 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 1489 if (!sit_i->sentries[start].cur_valid_map 1490 || !sit_i->sentries[start].ckpt_valid_map) 1491 return -ENOMEM; 1492 } 1493 1494 if (sbi->segs_per_sec > 1) { 1495 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) * 1496 sizeof(struct sec_entry)); 1497 if (!sit_i->sec_entries) 1498 return -ENOMEM; 1499 } 1500 1501 /* get information related with SIT */ 1502 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; 1503 1504 /* setup SIT bitmap from ckeckpoint pack */ 1505 bitmap_size = __bitmap_size(sbi, SIT_BITMAP); 1506 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); 1507 1508 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); 1509 if (!dst_bitmap) 1510 return -ENOMEM; 1511 1512 /* init SIT information */ 1513 sit_i->s_ops = &default_salloc_ops; 1514 1515 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); 1516 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; 1517 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count); 1518 sit_i->sit_bitmap = dst_bitmap; 1519 sit_i->bitmap_size = bitmap_size; 1520 sit_i->dirty_sentries = 0; 1521 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; 1522 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); 1523 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; 1524 mutex_init(&sit_i->sentry_lock); 1525 return 0; 1526 } 1527 1528 static int build_free_segmap(struct f2fs_sb_info *sbi) 1529 { 1530 struct f2fs_sm_info *sm_info = SM_I(sbi); 1531 struct free_segmap_info *free_i; 1532 unsigned int bitmap_size, sec_bitmap_size; 1533 1534 /* allocate memory for free segmap information */ 1535 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL); 1536 if (!free_i) 1537 return -ENOMEM; 1538 1539 SM_I(sbi)->free_info = free_i; 1540 1541 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); 1542 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL); 1543 if (!free_i->free_segmap) 1544 return -ENOMEM; 1545 1546 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi)); 1547 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL); 1548 if (!free_i->free_secmap) 1549 return -ENOMEM; 1550 1551 /* set all segments as dirty temporarily */ 1552 memset(free_i->free_segmap, 0xff, bitmap_size); 1553 memset(free_i->free_secmap, 0xff, sec_bitmap_size); 1554 1555 /* init free segmap information */ 1556 free_i->start_segno = 1557 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr); 1558 free_i->free_segments = 0; 1559 free_i->free_sections = 0; 1560 rwlock_init(&free_i->segmap_lock); 1561 return 0; 1562 } 1563 1564 static int build_curseg(struct f2fs_sb_info *sbi) 1565 { 1566 struct curseg_info *array; 1567 int i; 1568 1569 array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL); 1570 if (!array) 1571 return -ENOMEM; 1572 1573 SM_I(sbi)->curseg_array = array; 1574 1575 for (i = 0; i < NR_CURSEG_TYPE; i++) { 1576 mutex_init(&array[i].curseg_mutex); 1577 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 1578 if (!array[i].sum_blk) 1579 return -ENOMEM; 1580 array[i].segno = NULL_SEGNO; 1581 array[i].next_blkoff = 0; 1582 } 1583 return restore_curseg_summaries(sbi); 1584 } 1585 1586 static int ra_sit_pages(struct f2fs_sb_info *sbi, int start, int nrpages) 1587 { 1588 struct address_space *mapping = META_MAPPING(sbi); 1589 struct page *page; 1590 block_t blk_addr, prev_blk_addr = 0; 1591 int sit_blk_cnt = SIT_BLK_CNT(sbi); 1592 int blkno = start; 1593 struct f2fs_io_info fio = { 1594 .type = META, 1595 .rw = READ_SYNC | REQ_META | REQ_PRIO 1596 }; 1597 1598 for (; blkno < start + nrpages && blkno < sit_blk_cnt; blkno++) { 1599 1600 blk_addr = current_sit_addr(sbi, blkno * SIT_ENTRY_PER_BLOCK); 1601 1602 if (blkno != start && prev_blk_addr + 1 != blk_addr) 1603 break; 1604 prev_blk_addr = blk_addr; 1605 repeat: 1606 page = grab_cache_page(mapping, blk_addr); 1607 if (!page) { 1608 cond_resched(); 1609 goto repeat; 1610 } 1611 if (PageUptodate(page)) { 1612 mark_page_accessed(page); 1613 f2fs_put_page(page, 1); 1614 continue; 1615 } 1616 1617 f2fs_submit_page_mbio(sbi, page, blk_addr, &fio); 1618 1619 mark_page_accessed(page); 1620 f2fs_put_page(page, 0); 1621 } 1622 1623 f2fs_submit_merged_bio(sbi, META, READ); 1624 return blkno - start; 1625 } 1626 1627 static void build_sit_entries(struct f2fs_sb_info *sbi) 1628 { 1629 struct sit_info *sit_i = SIT_I(sbi); 1630 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1631 struct f2fs_summary_block *sum = curseg->sum_blk; 1632 int sit_blk_cnt = SIT_BLK_CNT(sbi); 1633 unsigned int i, start, end; 1634 unsigned int readed, start_blk = 0; 1635 int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); 1636 1637 do { 1638 readed = ra_sit_pages(sbi, start_blk, nrpages); 1639 1640 start = start_blk * sit_i->sents_per_block; 1641 end = (start_blk + readed) * sit_i->sents_per_block; 1642 1643 for (; start < end && start < TOTAL_SEGS(sbi); start++) { 1644 struct seg_entry *se = &sit_i->sentries[start]; 1645 struct f2fs_sit_block *sit_blk; 1646 struct f2fs_sit_entry sit; 1647 struct page *page; 1648 1649 mutex_lock(&curseg->curseg_mutex); 1650 for (i = 0; i < sits_in_cursum(sum); i++) { 1651 if (le32_to_cpu(segno_in_journal(sum, i)) 1652 == start) { 1653 sit = sit_in_journal(sum, i); 1654 mutex_unlock(&curseg->curseg_mutex); 1655 goto got_it; 1656 } 1657 } 1658 mutex_unlock(&curseg->curseg_mutex); 1659 1660 page = get_current_sit_page(sbi, start); 1661 sit_blk = (struct f2fs_sit_block *)page_address(page); 1662 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; 1663 f2fs_put_page(page, 1); 1664 got_it: 1665 check_block_count(sbi, start, &sit); 1666 seg_info_from_raw_sit(se, &sit); 1667 if (sbi->segs_per_sec > 1) { 1668 struct sec_entry *e = get_sec_entry(sbi, start); 1669 e->valid_blocks += se->valid_blocks; 1670 } 1671 } 1672 start_blk += readed; 1673 } while (start_blk < sit_blk_cnt); 1674 } 1675 1676 static void init_free_segmap(struct f2fs_sb_info *sbi) 1677 { 1678 unsigned int start; 1679 int type; 1680 1681 for (start = 0; start < TOTAL_SEGS(sbi); start++) { 1682 struct seg_entry *sentry = get_seg_entry(sbi, start); 1683 if (!sentry->valid_blocks) 1684 __set_free(sbi, start); 1685 } 1686 1687 /* set use the current segments */ 1688 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { 1689 struct curseg_info *curseg_t = CURSEG_I(sbi, type); 1690 __set_test_and_inuse(sbi, curseg_t->segno); 1691 } 1692 } 1693 1694 static void init_dirty_segmap(struct f2fs_sb_info *sbi) 1695 { 1696 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1697 struct free_segmap_info *free_i = FREE_I(sbi); 1698 unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi); 1699 unsigned short valid_blocks; 1700 1701 while (1) { 1702 /* find dirty segment based on free segmap */ 1703 segno = find_next_inuse(free_i, total_segs, offset); 1704 if (segno >= total_segs) 1705 break; 1706 offset = segno + 1; 1707 valid_blocks = get_valid_blocks(sbi, segno, 0); 1708 if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks) 1709 continue; 1710 mutex_lock(&dirty_i->seglist_lock); 1711 __locate_dirty_segment(sbi, segno, DIRTY); 1712 mutex_unlock(&dirty_i->seglist_lock); 1713 } 1714 } 1715 1716 static int init_victim_secmap(struct f2fs_sb_info *sbi) 1717 { 1718 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1719 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi)); 1720 1721 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL); 1722 if (!dirty_i->victim_secmap) 1723 return -ENOMEM; 1724 return 0; 1725 } 1726 1727 static int build_dirty_segmap(struct f2fs_sb_info *sbi) 1728 { 1729 struct dirty_seglist_info *dirty_i; 1730 unsigned int bitmap_size, i; 1731 1732 /* allocate memory for dirty segments list information */ 1733 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL); 1734 if (!dirty_i) 1735 return -ENOMEM; 1736 1737 SM_I(sbi)->dirty_info = dirty_i; 1738 mutex_init(&dirty_i->seglist_lock); 1739 1740 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); 1741 1742 for (i = 0; i < NR_DIRTY_TYPE; i++) { 1743 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL); 1744 if (!dirty_i->dirty_segmap[i]) 1745 return -ENOMEM; 1746 } 1747 1748 init_dirty_segmap(sbi); 1749 return init_victim_secmap(sbi); 1750 } 1751 1752 /* 1753 * Update min, max modified time for cost-benefit GC algorithm 1754 */ 1755 static void init_min_max_mtime(struct f2fs_sb_info *sbi) 1756 { 1757 struct sit_info *sit_i = SIT_I(sbi); 1758 unsigned int segno; 1759 1760 mutex_lock(&sit_i->sentry_lock); 1761 1762 sit_i->min_mtime = LLONG_MAX; 1763 1764 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) { 1765 unsigned int i; 1766 unsigned long long mtime = 0; 1767 1768 for (i = 0; i < sbi->segs_per_sec; i++) 1769 mtime += get_seg_entry(sbi, segno + i)->mtime; 1770 1771 mtime = div_u64(mtime, sbi->segs_per_sec); 1772 1773 if (sit_i->min_mtime > mtime) 1774 sit_i->min_mtime = mtime; 1775 } 1776 sit_i->max_mtime = get_mtime(sbi); 1777 mutex_unlock(&sit_i->sentry_lock); 1778 } 1779 1780 int build_segment_manager(struct f2fs_sb_info *sbi) 1781 { 1782 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 1783 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1784 struct f2fs_sm_info *sm_info; 1785 int err; 1786 1787 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); 1788 if (!sm_info) 1789 return -ENOMEM; 1790 1791 /* init sm info */ 1792 sbi->sm_info = sm_info; 1793 INIT_LIST_HEAD(&sm_info->wblist_head); 1794 spin_lock_init(&sm_info->wblist_lock); 1795 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 1796 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 1797 sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 1798 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 1799 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 1800 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); 1801 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 1802 sm_info->rec_prefree_segments = DEF_RECLAIM_PREFREE_SEGMENTS; 1803 sm_info->ipu_policy = F2FS_IPU_DISABLE; 1804 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; 1805 1806 INIT_LIST_HEAD(&sm_info->discard_list); 1807 sm_info->nr_discards = 0; 1808 sm_info->max_discards = 0; 1809 1810 err = build_sit_info(sbi); 1811 if (err) 1812 return err; 1813 err = build_free_segmap(sbi); 1814 if (err) 1815 return err; 1816 err = build_curseg(sbi); 1817 if (err) 1818 return err; 1819 1820 /* reinit free segmap based on SIT */ 1821 build_sit_entries(sbi); 1822 1823 init_free_segmap(sbi); 1824 err = build_dirty_segmap(sbi); 1825 if (err) 1826 return err; 1827 1828 init_min_max_mtime(sbi); 1829 return 0; 1830 } 1831 1832 static void discard_dirty_segmap(struct f2fs_sb_info *sbi, 1833 enum dirty_type dirty_type) 1834 { 1835 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1836 1837 mutex_lock(&dirty_i->seglist_lock); 1838 kfree(dirty_i->dirty_segmap[dirty_type]); 1839 dirty_i->nr_dirty[dirty_type] = 0; 1840 mutex_unlock(&dirty_i->seglist_lock); 1841 } 1842 1843 static void destroy_victim_secmap(struct f2fs_sb_info *sbi) 1844 { 1845 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1846 kfree(dirty_i->victim_secmap); 1847 } 1848 1849 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) 1850 { 1851 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1852 int i; 1853 1854 if (!dirty_i) 1855 return; 1856 1857 /* discard pre-free/dirty segments list */ 1858 for (i = 0; i < NR_DIRTY_TYPE; i++) 1859 discard_dirty_segmap(sbi, i); 1860 1861 destroy_victim_secmap(sbi); 1862 SM_I(sbi)->dirty_info = NULL; 1863 kfree(dirty_i); 1864 } 1865 1866 static void destroy_curseg(struct f2fs_sb_info *sbi) 1867 { 1868 struct curseg_info *array = SM_I(sbi)->curseg_array; 1869 int i; 1870 1871 if (!array) 1872 return; 1873 SM_I(sbi)->curseg_array = NULL; 1874 for (i = 0; i < NR_CURSEG_TYPE; i++) 1875 kfree(array[i].sum_blk); 1876 kfree(array); 1877 } 1878 1879 static void destroy_free_segmap(struct f2fs_sb_info *sbi) 1880 { 1881 struct free_segmap_info *free_i = SM_I(sbi)->free_info; 1882 if (!free_i) 1883 return; 1884 SM_I(sbi)->free_info = NULL; 1885 kfree(free_i->free_segmap); 1886 kfree(free_i->free_secmap); 1887 kfree(free_i); 1888 } 1889 1890 static void destroy_sit_info(struct f2fs_sb_info *sbi) 1891 { 1892 struct sit_info *sit_i = SIT_I(sbi); 1893 unsigned int start; 1894 1895 if (!sit_i) 1896 return; 1897 1898 if (sit_i->sentries) { 1899 for (start = 0; start < TOTAL_SEGS(sbi); start++) { 1900 kfree(sit_i->sentries[start].cur_valid_map); 1901 kfree(sit_i->sentries[start].ckpt_valid_map); 1902 } 1903 } 1904 vfree(sit_i->sentries); 1905 vfree(sit_i->sec_entries); 1906 kfree(sit_i->dirty_sentries_bitmap); 1907 1908 SM_I(sbi)->sit_info = NULL; 1909 kfree(sit_i->sit_bitmap); 1910 kfree(sit_i); 1911 } 1912 1913 void destroy_segment_manager(struct f2fs_sb_info *sbi) 1914 { 1915 struct f2fs_sm_info *sm_info = SM_I(sbi); 1916 if (!sm_info) 1917 return; 1918 destroy_dirty_segmap(sbi); 1919 destroy_curseg(sbi); 1920 destroy_free_segmap(sbi); 1921 destroy_sit_info(sbi); 1922 sbi->sm_info = NULL; 1923 kfree(sm_info); 1924 } 1925 1926 int __init create_segment_manager_caches(void) 1927 { 1928 discard_entry_slab = f2fs_kmem_cache_create("discard_entry", 1929 sizeof(struct discard_entry), NULL); 1930 if (!discard_entry_slab) 1931 return -ENOMEM; 1932 return 0; 1933 } 1934 1935 void destroy_segment_manager_caches(void) 1936 { 1937 kmem_cache_destroy(discard_entry_slab); 1938 } 1939