1 /* 2 * fs/f2fs/segment.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/bio.h> 14 #include <linux/blkdev.h> 15 #include <linux/prefetch.h> 16 #include <linux/vmalloc.h> 17 18 #include "f2fs.h" 19 #include "segment.h" 20 #include "node.h" 21 #include <trace/events/f2fs.h> 22 23 /* 24 * This function balances dirty node and dentry pages. 25 * In addition, it controls garbage collection. 26 */ 27 void f2fs_balance_fs(struct f2fs_sb_info *sbi) 28 { 29 /* 30 * We should do GC or end up with checkpoint, if there are so many dirty 31 * dir/node pages without enough free segments. 32 */ 33 if (has_not_enough_free_secs(sbi, 0)) { 34 mutex_lock(&sbi->gc_mutex); 35 f2fs_gc(sbi); 36 } 37 } 38 39 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 40 enum dirty_type dirty_type) 41 { 42 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 43 44 /* need not be added */ 45 if (IS_CURSEG(sbi, segno)) 46 return; 47 48 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) 49 dirty_i->nr_dirty[dirty_type]++; 50 51 if (dirty_type == DIRTY) { 52 struct seg_entry *sentry = get_seg_entry(sbi, segno); 53 enum dirty_type t = DIRTY_HOT_DATA; 54 55 dirty_type = sentry->type; 56 57 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) 58 dirty_i->nr_dirty[dirty_type]++; 59 60 /* Only one bitmap should be set */ 61 for (; t <= DIRTY_COLD_NODE; t++) { 62 if (t == dirty_type) 63 continue; 64 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) 65 dirty_i->nr_dirty[t]--; 66 } 67 } 68 } 69 70 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 71 enum dirty_type dirty_type) 72 { 73 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 74 75 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) 76 dirty_i->nr_dirty[dirty_type]--; 77 78 if (dirty_type == DIRTY) { 79 enum dirty_type t = DIRTY_HOT_DATA; 80 81 /* clear all the bitmaps */ 82 for (; t <= DIRTY_COLD_NODE; t++) 83 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) 84 dirty_i->nr_dirty[t]--; 85 86 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0) 87 clear_bit(GET_SECNO(sbi, segno), 88 dirty_i->victim_secmap); 89 } 90 } 91 92 /* 93 * Should not occur error such as -ENOMEM. 94 * Adding dirty entry into seglist is not critical operation. 95 * If a given segment is one of current working segments, it won't be added. 96 */ 97 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) 98 { 99 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 100 unsigned short valid_blocks; 101 102 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) 103 return; 104 105 mutex_lock(&dirty_i->seglist_lock); 106 107 valid_blocks = get_valid_blocks(sbi, segno, 0); 108 109 if (valid_blocks == 0) { 110 __locate_dirty_segment(sbi, segno, PRE); 111 __remove_dirty_segment(sbi, segno, DIRTY); 112 } else if (valid_blocks < sbi->blocks_per_seg) { 113 __locate_dirty_segment(sbi, segno, DIRTY); 114 } else { 115 /* Recovery routine with SSR needs this */ 116 __remove_dirty_segment(sbi, segno, DIRTY); 117 } 118 119 mutex_unlock(&dirty_i->seglist_lock); 120 } 121 122 /* 123 * Should call clear_prefree_segments after checkpoint is done. 124 */ 125 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) 126 { 127 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 128 unsigned int segno = -1; 129 unsigned int total_segs = TOTAL_SEGS(sbi); 130 131 mutex_lock(&dirty_i->seglist_lock); 132 while (1) { 133 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs, 134 segno + 1); 135 if (segno >= total_segs) 136 break; 137 __set_test_and_free(sbi, segno); 138 } 139 mutex_unlock(&dirty_i->seglist_lock); 140 } 141 142 void clear_prefree_segments(struct f2fs_sb_info *sbi) 143 { 144 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 145 unsigned int segno = -1; 146 unsigned int total_segs = TOTAL_SEGS(sbi); 147 148 mutex_lock(&dirty_i->seglist_lock); 149 while (1) { 150 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs, 151 segno + 1); 152 if (segno >= total_segs) 153 break; 154 155 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE])) 156 dirty_i->nr_dirty[PRE]--; 157 158 /* Let's use trim */ 159 if (test_opt(sbi, DISCARD)) 160 blkdev_issue_discard(sbi->sb->s_bdev, 161 START_BLOCK(sbi, segno) << 162 sbi->log_sectors_per_block, 163 1 << (sbi->log_sectors_per_block + 164 sbi->log_blocks_per_seg), 165 GFP_NOFS, 0); 166 } 167 mutex_unlock(&dirty_i->seglist_lock); 168 } 169 170 static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) 171 { 172 struct sit_info *sit_i = SIT_I(sbi); 173 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) 174 sit_i->dirty_sentries++; 175 } 176 177 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, 178 unsigned int segno, int modified) 179 { 180 struct seg_entry *se = get_seg_entry(sbi, segno); 181 se->type = type; 182 if (modified) 183 __mark_sit_entry_dirty(sbi, segno); 184 } 185 186 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) 187 { 188 struct seg_entry *se; 189 unsigned int segno, offset; 190 long int new_vblocks; 191 192 segno = GET_SEGNO(sbi, blkaddr); 193 194 se = get_seg_entry(sbi, segno); 195 new_vblocks = se->valid_blocks + del; 196 offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1); 197 198 BUG_ON((new_vblocks >> (sizeof(unsigned short) << 3) || 199 (new_vblocks > sbi->blocks_per_seg))); 200 201 se->valid_blocks = new_vblocks; 202 se->mtime = get_mtime(sbi); 203 SIT_I(sbi)->max_mtime = se->mtime; 204 205 /* Update valid block bitmap */ 206 if (del > 0) { 207 if (f2fs_set_bit(offset, se->cur_valid_map)) 208 BUG(); 209 } else { 210 if (!f2fs_clear_bit(offset, se->cur_valid_map)) 211 BUG(); 212 } 213 if (!f2fs_test_bit(offset, se->ckpt_valid_map)) 214 se->ckpt_valid_blocks += del; 215 216 __mark_sit_entry_dirty(sbi, segno); 217 218 /* update total number of valid blocks to be written in ckpt area */ 219 SIT_I(sbi)->written_valid_blocks += del; 220 221 if (sbi->segs_per_sec > 1) 222 get_sec_entry(sbi, segno)->valid_blocks += del; 223 } 224 225 static void refresh_sit_entry(struct f2fs_sb_info *sbi, 226 block_t old_blkaddr, block_t new_blkaddr) 227 { 228 update_sit_entry(sbi, new_blkaddr, 1); 229 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) 230 update_sit_entry(sbi, old_blkaddr, -1); 231 } 232 233 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) 234 { 235 unsigned int segno = GET_SEGNO(sbi, addr); 236 struct sit_info *sit_i = SIT_I(sbi); 237 238 BUG_ON(addr == NULL_ADDR); 239 if (addr == NEW_ADDR) 240 return; 241 242 /* add it into sit main buffer */ 243 mutex_lock(&sit_i->sentry_lock); 244 245 update_sit_entry(sbi, addr, -1); 246 247 /* add it into dirty seglist */ 248 locate_dirty_segment(sbi, segno); 249 250 mutex_unlock(&sit_i->sentry_lock); 251 } 252 253 /* 254 * This function should be resided under the curseg_mutex lock 255 */ 256 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, 257 struct f2fs_summary *sum) 258 { 259 struct curseg_info *curseg = CURSEG_I(sbi, type); 260 void *addr = curseg->sum_blk; 261 addr += curseg->next_blkoff * sizeof(struct f2fs_summary); 262 memcpy(addr, sum, sizeof(struct f2fs_summary)); 263 } 264 265 /* 266 * Calculate the number of current summary pages for writing 267 */ 268 int npages_for_summary_flush(struct f2fs_sb_info *sbi) 269 { 270 int total_size_bytes = 0; 271 int valid_sum_count = 0; 272 int i, sum_space; 273 274 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 275 if (sbi->ckpt->alloc_type[i] == SSR) 276 valid_sum_count += sbi->blocks_per_seg; 277 else 278 valid_sum_count += curseg_blkoff(sbi, i); 279 } 280 281 total_size_bytes = valid_sum_count * (SUMMARY_SIZE + 1) 282 + sizeof(struct nat_journal) + 2 283 + sizeof(struct sit_journal) + 2; 284 sum_space = PAGE_CACHE_SIZE - SUM_FOOTER_SIZE; 285 if (total_size_bytes < sum_space) 286 return 1; 287 else if (total_size_bytes < 2 * sum_space) 288 return 2; 289 return 3; 290 } 291 292 /* 293 * Caller should put this summary page 294 */ 295 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) 296 { 297 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno)); 298 } 299 300 static void write_sum_page(struct f2fs_sb_info *sbi, 301 struct f2fs_summary_block *sum_blk, block_t blk_addr) 302 { 303 struct page *page = grab_meta_page(sbi, blk_addr); 304 void *kaddr = page_address(page); 305 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE); 306 set_page_dirty(page); 307 f2fs_put_page(page, 1); 308 } 309 310 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) 311 { 312 struct curseg_info *curseg = CURSEG_I(sbi, type); 313 unsigned int segno = curseg->segno + 1; 314 struct free_segmap_info *free_i = FREE_I(sbi); 315 316 if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec) 317 return !test_bit(segno, free_i->free_segmap); 318 return 0; 319 } 320 321 /* 322 * Find a new segment from the free segments bitmap to right order 323 * This function should be returned with success, otherwise BUG 324 */ 325 static void get_new_segment(struct f2fs_sb_info *sbi, 326 unsigned int *newseg, bool new_sec, int dir) 327 { 328 struct free_segmap_info *free_i = FREE_I(sbi); 329 unsigned int segno, secno, zoneno; 330 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone; 331 unsigned int hint = *newseg / sbi->segs_per_sec; 332 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg); 333 unsigned int left_start = hint; 334 bool init = true; 335 int go_left = 0; 336 int i; 337 338 write_lock(&free_i->segmap_lock); 339 340 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { 341 segno = find_next_zero_bit(free_i->free_segmap, 342 TOTAL_SEGS(sbi), *newseg + 1); 343 if (segno - *newseg < sbi->segs_per_sec - 344 (*newseg % sbi->segs_per_sec)) 345 goto got_it; 346 } 347 find_other_zone: 348 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint); 349 if (secno >= TOTAL_SECS(sbi)) { 350 if (dir == ALLOC_RIGHT) { 351 secno = find_next_zero_bit(free_i->free_secmap, 352 TOTAL_SECS(sbi), 0); 353 BUG_ON(secno >= TOTAL_SECS(sbi)); 354 } else { 355 go_left = 1; 356 left_start = hint - 1; 357 } 358 } 359 if (go_left == 0) 360 goto skip_left; 361 362 while (test_bit(left_start, free_i->free_secmap)) { 363 if (left_start > 0) { 364 left_start--; 365 continue; 366 } 367 left_start = find_next_zero_bit(free_i->free_secmap, 368 TOTAL_SECS(sbi), 0); 369 BUG_ON(left_start >= TOTAL_SECS(sbi)); 370 break; 371 } 372 secno = left_start; 373 skip_left: 374 hint = secno; 375 segno = secno * sbi->segs_per_sec; 376 zoneno = secno / sbi->secs_per_zone; 377 378 /* give up on finding another zone */ 379 if (!init) 380 goto got_it; 381 if (sbi->secs_per_zone == 1) 382 goto got_it; 383 if (zoneno == old_zoneno) 384 goto got_it; 385 if (dir == ALLOC_LEFT) { 386 if (!go_left && zoneno + 1 >= total_zones) 387 goto got_it; 388 if (go_left && zoneno == 0) 389 goto got_it; 390 } 391 for (i = 0; i < NR_CURSEG_TYPE; i++) 392 if (CURSEG_I(sbi, i)->zone == zoneno) 393 break; 394 395 if (i < NR_CURSEG_TYPE) { 396 /* zone is in user, try another */ 397 if (go_left) 398 hint = zoneno * sbi->secs_per_zone - 1; 399 else if (zoneno + 1 >= total_zones) 400 hint = 0; 401 else 402 hint = (zoneno + 1) * sbi->secs_per_zone; 403 init = false; 404 goto find_other_zone; 405 } 406 got_it: 407 /* set it as dirty segment in free segmap */ 408 BUG_ON(test_bit(segno, free_i->free_segmap)); 409 __set_inuse(sbi, segno); 410 *newseg = segno; 411 write_unlock(&free_i->segmap_lock); 412 } 413 414 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) 415 { 416 struct curseg_info *curseg = CURSEG_I(sbi, type); 417 struct summary_footer *sum_footer; 418 419 curseg->segno = curseg->next_segno; 420 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno); 421 curseg->next_blkoff = 0; 422 curseg->next_segno = NULL_SEGNO; 423 424 sum_footer = &(curseg->sum_blk->footer); 425 memset(sum_footer, 0, sizeof(struct summary_footer)); 426 if (IS_DATASEG(type)) 427 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); 428 if (IS_NODESEG(type)) 429 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); 430 __set_sit_entry_type(sbi, type, curseg->segno, modified); 431 } 432 433 /* 434 * Allocate a current working segment. 435 * This function always allocates a free segment in LFS manner. 436 */ 437 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) 438 { 439 struct curseg_info *curseg = CURSEG_I(sbi, type); 440 unsigned int segno = curseg->segno; 441 int dir = ALLOC_LEFT; 442 443 write_sum_page(sbi, curseg->sum_blk, 444 GET_SUM_BLOCK(sbi, segno)); 445 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA) 446 dir = ALLOC_RIGHT; 447 448 if (test_opt(sbi, NOHEAP)) 449 dir = ALLOC_RIGHT; 450 451 get_new_segment(sbi, &segno, new_sec, dir); 452 curseg->next_segno = segno; 453 reset_curseg(sbi, type, 1); 454 curseg->alloc_type = LFS; 455 } 456 457 static void __next_free_blkoff(struct f2fs_sb_info *sbi, 458 struct curseg_info *seg, block_t start) 459 { 460 struct seg_entry *se = get_seg_entry(sbi, seg->segno); 461 block_t ofs; 462 for (ofs = start; ofs < sbi->blocks_per_seg; ofs++) { 463 if (!f2fs_test_bit(ofs, se->ckpt_valid_map) 464 && !f2fs_test_bit(ofs, se->cur_valid_map)) 465 break; 466 } 467 seg->next_blkoff = ofs; 468 } 469 470 /* 471 * If a segment is written by LFS manner, next block offset is just obtained 472 * by increasing the current block offset. However, if a segment is written by 473 * SSR manner, next block offset obtained by calling __next_free_blkoff 474 */ 475 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, 476 struct curseg_info *seg) 477 { 478 if (seg->alloc_type == SSR) 479 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1); 480 else 481 seg->next_blkoff++; 482 } 483 484 /* 485 * This function always allocates a used segment (from dirty seglist) by SSR 486 * manner, so it should recover the existing segment information of valid blocks 487 */ 488 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse) 489 { 490 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 491 struct curseg_info *curseg = CURSEG_I(sbi, type); 492 unsigned int new_segno = curseg->next_segno; 493 struct f2fs_summary_block *sum_node; 494 struct page *sum_page; 495 496 write_sum_page(sbi, curseg->sum_blk, 497 GET_SUM_BLOCK(sbi, curseg->segno)); 498 __set_test_and_inuse(sbi, new_segno); 499 500 mutex_lock(&dirty_i->seglist_lock); 501 __remove_dirty_segment(sbi, new_segno, PRE); 502 __remove_dirty_segment(sbi, new_segno, DIRTY); 503 mutex_unlock(&dirty_i->seglist_lock); 504 505 reset_curseg(sbi, type, 1); 506 curseg->alloc_type = SSR; 507 __next_free_blkoff(sbi, curseg, 0); 508 509 if (reuse) { 510 sum_page = get_sum_page(sbi, new_segno); 511 sum_node = (struct f2fs_summary_block *)page_address(sum_page); 512 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); 513 f2fs_put_page(sum_page, 1); 514 } 515 } 516 517 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) 518 { 519 struct curseg_info *curseg = CURSEG_I(sbi, type); 520 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops; 521 522 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0)) 523 return v_ops->get_victim(sbi, 524 &(curseg)->next_segno, BG_GC, type, SSR); 525 526 /* For data segments, let's do SSR more intensively */ 527 for (; type >= CURSEG_HOT_DATA; type--) 528 if (v_ops->get_victim(sbi, &(curseg)->next_segno, 529 BG_GC, type, SSR)) 530 return 1; 531 return 0; 532 } 533 534 /* 535 * flush out current segment and replace it with new segment 536 * This function should be returned with success, otherwise BUG 537 */ 538 static void allocate_segment_by_default(struct f2fs_sb_info *sbi, 539 int type, bool force) 540 { 541 struct curseg_info *curseg = CURSEG_I(sbi, type); 542 543 if (force) 544 new_curseg(sbi, type, true); 545 else if (type == CURSEG_WARM_NODE) 546 new_curseg(sbi, type, false); 547 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type)) 548 new_curseg(sbi, type, false); 549 else if (need_SSR(sbi) && get_ssr_segment(sbi, type)) 550 change_curseg(sbi, type, true); 551 else 552 new_curseg(sbi, type, false); 553 #ifdef CONFIG_F2FS_STAT_FS 554 sbi->segment_count[curseg->alloc_type]++; 555 #endif 556 } 557 558 void allocate_new_segments(struct f2fs_sb_info *sbi) 559 { 560 struct curseg_info *curseg; 561 unsigned int old_curseg; 562 int i; 563 564 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 565 curseg = CURSEG_I(sbi, i); 566 old_curseg = curseg->segno; 567 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); 568 locate_dirty_segment(sbi, old_curseg); 569 } 570 } 571 572 static const struct segment_allocation default_salloc_ops = { 573 .allocate_segment = allocate_segment_by_default, 574 }; 575 576 static void f2fs_end_io_write(struct bio *bio, int err) 577 { 578 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 579 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 580 struct bio_private *p = bio->bi_private; 581 582 do { 583 struct page *page = bvec->bv_page; 584 585 if (--bvec >= bio->bi_io_vec) 586 prefetchw(&bvec->bv_page->flags); 587 if (!uptodate) { 588 SetPageError(page); 589 if (page->mapping) 590 set_bit(AS_EIO, &page->mapping->flags); 591 set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG); 592 p->sbi->sb->s_flags |= MS_RDONLY; 593 } 594 end_page_writeback(page); 595 dec_page_count(p->sbi, F2FS_WRITEBACK); 596 } while (bvec >= bio->bi_io_vec); 597 598 if (p->is_sync) 599 complete(p->wait); 600 kfree(p); 601 bio_put(bio); 602 } 603 604 struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages) 605 { 606 struct bio *bio; 607 608 /* No failure on bio allocation */ 609 bio = bio_alloc(GFP_NOIO, npages); 610 bio->bi_bdev = bdev; 611 bio->bi_private = NULL; 612 613 return bio; 614 } 615 616 static void do_submit_bio(struct f2fs_sb_info *sbi, 617 enum page_type type, bool sync) 618 { 619 int rw = sync ? WRITE_SYNC : WRITE; 620 enum page_type btype = type > META ? META : type; 621 622 if (type >= META_FLUSH) 623 rw = WRITE_FLUSH_FUA; 624 625 if (btype == META) 626 rw |= REQ_META; 627 628 if (sbi->bio[btype]) { 629 struct bio_private *p = sbi->bio[btype]->bi_private; 630 p->sbi = sbi; 631 sbi->bio[btype]->bi_end_io = f2fs_end_io_write; 632 633 trace_f2fs_do_submit_bio(sbi->sb, btype, sync, sbi->bio[btype]); 634 635 if (type == META_FLUSH) { 636 DECLARE_COMPLETION_ONSTACK(wait); 637 p->is_sync = true; 638 p->wait = &wait; 639 submit_bio(rw, sbi->bio[btype]); 640 wait_for_completion(&wait); 641 } else { 642 p->is_sync = false; 643 submit_bio(rw, sbi->bio[btype]); 644 } 645 sbi->bio[btype] = NULL; 646 } 647 } 648 649 void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync) 650 { 651 down_write(&sbi->bio_sem); 652 do_submit_bio(sbi, type, sync); 653 up_write(&sbi->bio_sem); 654 } 655 656 static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page, 657 block_t blk_addr, enum page_type type) 658 { 659 struct block_device *bdev = sbi->sb->s_bdev; 660 661 verify_block_addr(sbi, blk_addr); 662 663 down_write(&sbi->bio_sem); 664 665 inc_page_count(sbi, F2FS_WRITEBACK); 666 667 if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1) 668 do_submit_bio(sbi, type, false); 669 alloc_new: 670 if (sbi->bio[type] == NULL) { 671 struct bio_private *priv; 672 retry: 673 priv = kmalloc(sizeof(struct bio_private), GFP_NOFS); 674 if (!priv) { 675 cond_resched(); 676 goto retry; 677 } 678 679 sbi->bio[type] = f2fs_bio_alloc(bdev, max_hw_blocks(sbi)); 680 sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); 681 sbi->bio[type]->bi_private = priv; 682 /* 683 * The end_io will be assigned at the sumbission phase. 684 * Until then, let bio_add_page() merge consecutive IOs as much 685 * as possible. 686 */ 687 } 688 689 if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) < 690 PAGE_CACHE_SIZE) { 691 do_submit_bio(sbi, type, false); 692 goto alloc_new; 693 } 694 695 sbi->last_block_in_bio[type] = blk_addr; 696 697 up_write(&sbi->bio_sem); 698 trace_f2fs_submit_write_page(page, blk_addr, type); 699 } 700 701 void f2fs_wait_on_page_writeback(struct page *page, 702 enum page_type type, bool sync) 703 { 704 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 705 if (PageWriteback(page)) { 706 f2fs_submit_bio(sbi, type, sync); 707 wait_on_page_writeback(page); 708 } 709 } 710 711 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) 712 { 713 struct curseg_info *curseg = CURSEG_I(sbi, type); 714 if (curseg->next_blkoff < sbi->blocks_per_seg) 715 return true; 716 return false; 717 } 718 719 static int __get_segment_type_2(struct page *page, enum page_type p_type) 720 { 721 if (p_type == DATA) 722 return CURSEG_HOT_DATA; 723 else 724 return CURSEG_HOT_NODE; 725 } 726 727 static int __get_segment_type_4(struct page *page, enum page_type p_type) 728 { 729 if (p_type == DATA) { 730 struct inode *inode = page->mapping->host; 731 732 if (S_ISDIR(inode->i_mode)) 733 return CURSEG_HOT_DATA; 734 else 735 return CURSEG_COLD_DATA; 736 } else { 737 if (IS_DNODE(page) && !is_cold_node(page)) 738 return CURSEG_HOT_NODE; 739 else 740 return CURSEG_COLD_NODE; 741 } 742 } 743 744 static int __get_segment_type_6(struct page *page, enum page_type p_type) 745 { 746 if (p_type == DATA) { 747 struct inode *inode = page->mapping->host; 748 749 if (S_ISDIR(inode->i_mode)) 750 return CURSEG_HOT_DATA; 751 else if (is_cold_data(page) || file_is_cold(inode)) 752 return CURSEG_COLD_DATA; 753 else 754 return CURSEG_WARM_DATA; 755 } else { 756 if (IS_DNODE(page)) 757 return is_cold_node(page) ? CURSEG_WARM_NODE : 758 CURSEG_HOT_NODE; 759 else 760 return CURSEG_COLD_NODE; 761 } 762 } 763 764 static int __get_segment_type(struct page *page, enum page_type p_type) 765 { 766 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 767 switch (sbi->active_logs) { 768 case 2: 769 return __get_segment_type_2(page, p_type); 770 case 4: 771 return __get_segment_type_4(page, p_type); 772 } 773 /* NR_CURSEG_TYPE(6) logs by default */ 774 BUG_ON(sbi->active_logs != NR_CURSEG_TYPE); 775 return __get_segment_type_6(page, p_type); 776 } 777 778 static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, 779 block_t old_blkaddr, block_t *new_blkaddr, 780 struct f2fs_summary *sum, enum page_type p_type) 781 { 782 struct sit_info *sit_i = SIT_I(sbi); 783 struct curseg_info *curseg; 784 unsigned int old_cursegno; 785 int type; 786 787 type = __get_segment_type(page, p_type); 788 curseg = CURSEG_I(sbi, type); 789 790 mutex_lock(&curseg->curseg_mutex); 791 792 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 793 old_cursegno = curseg->segno; 794 795 /* 796 * __add_sum_entry should be resided under the curseg_mutex 797 * because, this function updates a summary entry in the 798 * current summary block. 799 */ 800 __add_sum_entry(sbi, type, sum); 801 802 mutex_lock(&sit_i->sentry_lock); 803 __refresh_next_blkoff(sbi, curseg); 804 #ifdef CONFIG_F2FS_STAT_FS 805 sbi->block_count[curseg->alloc_type]++; 806 #endif 807 808 /* 809 * SIT information should be updated before segment allocation, 810 * since SSR needs latest valid block information. 811 */ 812 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); 813 814 if (!__has_curseg_space(sbi, type)) 815 sit_i->s_ops->allocate_segment(sbi, type, false); 816 817 locate_dirty_segment(sbi, old_cursegno); 818 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 819 mutex_unlock(&sit_i->sentry_lock); 820 821 if (p_type == NODE) 822 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); 823 824 /* writeout dirty page into bdev */ 825 submit_write_page(sbi, page, *new_blkaddr, p_type); 826 827 mutex_unlock(&curseg->curseg_mutex); 828 } 829 830 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) 831 { 832 set_page_writeback(page); 833 submit_write_page(sbi, page, page->index, META); 834 } 835 836 void write_node_page(struct f2fs_sb_info *sbi, struct page *page, 837 unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr) 838 { 839 struct f2fs_summary sum; 840 set_summary(&sum, nid, 0, 0); 841 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE); 842 } 843 844 void write_data_page(struct inode *inode, struct page *page, 845 struct dnode_of_data *dn, block_t old_blkaddr, 846 block_t *new_blkaddr) 847 { 848 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 849 struct f2fs_summary sum; 850 struct node_info ni; 851 852 BUG_ON(old_blkaddr == NULL_ADDR); 853 get_node_info(sbi, dn->nid, &ni); 854 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 855 856 do_write_page(sbi, page, old_blkaddr, 857 new_blkaddr, &sum, DATA); 858 } 859 860 void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page, 861 block_t old_blk_addr) 862 { 863 submit_write_page(sbi, page, old_blk_addr, DATA); 864 } 865 866 void recover_data_page(struct f2fs_sb_info *sbi, 867 struct page *page, struct f2fs_summary *sum, 868 block_t old_blkaddr, block_t new_blkaddr) 869 { 870 struct sit_info *sit_i = SIT_I(sbi); 871 struct curseg_info *curseg; 872 unsigned int segno, old_cursegno; 873 struct seg_entry *se; 874 int type; 875 876 segno = GET_SEGNO(sbi, new_blkaddr); 877 se = get_seg_entry(sbi, segno); 878 type = se->type; 879 880 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { 881 if (old_blkaddr == NULL_ADDR) 882 type = CURSEG_COLD_DATA; 883 else 884 type = CURSEG_WARM_DATA; 885 } 886 curseg = CURSEG_I(sbi, type); 887 888 mutex_lock(&curseg->curseg_mutex); 889 mutex_lock(&sit_i->sentry_lock); 890 891 old_cursegno = curseg->segno; 892 893 /* change the current segment */ 894 if (segno != curseg->segno) { 895 curseg->next_segno = segno; 896 change_curseg(sbi, type, true); 897 } 898 899 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & 900 (sbi->blocks_per_seg - 1); 901 __add_sum_entry(sbi, type, sum); 902 903 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); 904 905 locate_dirty_segment(sbi, old_cursegno); 906 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 907 908 mutex_unlock(&sit_i->sentry_lock); 909 mutex_unlock(&curseg->curseg_mutex); 910 } 911 912 void rewrite_node_page(struct f2fs_sb_info *sbi, 913 struct page *page, struct f2fs_summary *sum, 914 block_t old_blkaddr, block_t new_blkaddr) 915 { 916 struct sit_info *sit_i = SIT_I(sbi); 917 int type = CURSEG_WARM_NODE; 918 struct curseg_info *curseg; 919 unsigned int segno, old_cursegno; 920 block_t next_blkaddr = next_blkaddr_of_node(page); 921 unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr); 922 923 curseg = CURSEG_I(sbi, type); 924 925 mutex_lock(&curseg->curseg_mutex); 926 mutex_lock(&sit_i->sentry_lock); 927 928 segno = GET_SEGNO(sbi, new_blkaddr); 929 old_cursegno = curseg->segno; 930 931 /* change the current segment */ 932 if (segno != curseg->segno) { 933 curseg->next_segno = segno; 934 change_curseg(sbi, type, true); 935 } 936 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & 937 (sbi->blocks_per_seg - 1); 938 __add_sum_entry(sbi, type, sum); 939 940 /* change the current log to the next block addr in advance */ 941 if (next_segno != segno) { 942 curseg->next_segno = next_segno; 943 change_curseg(sbi, type, true); 944 } 945 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) & 946 (sbi->blocks_per_seg - 1); 947 948 /* rewrite node page */ 949 set_page_writeback(page); 950 submit_write_page(sbi, page, new_blkaddr, NODE); 951 f2fs_submit_bio(sbi, NODE, true); 952 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); 953 954 locate_dirty_segment(sbi, old_cursegno); 955 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 956 957 mutex_unlock(&sit_i->sentry_lock); 958 mutex_unlock(&curseg->curseg_mutex); 959 } 960 961 static int read_compacted_summaries(struct f2fs_sb_info *sbi) 962 { 963 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 964 struct curseg_info *seg_i; 965 unsigned char *kaddr; 966 struct page *page; 967 block_t start; 968 int i, j, offset; 969 970 start = start_sum_block(sbi); 971 972 page = get_meta_page(sbi, start++); 973 kaddr = (unsigned char *)page_address(page); 974 975 /* Step 1: restore nat cache */ 976 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 977 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE); 978 979 /* Step 2: restore sit cache */ 980 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 981 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE, 982 SUM_JOURNAL_SIZE); 983 offset = 2 * SUM_JOURNAL_SIZE; 984 985 /* Step 3: restore summary entries */ 986 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 987 unsigned short blk_off; 988 unsigned int segno; 989 990 seg_i = CURSEG_I(sbi, i); 991 segno = le32_to_cpu(ckpt->cur_data_segno[i]); 992 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); 993 seg_i->next_segno = segno; 994 reset_curseg(sbi, i, 0); 995 seg_i->alloc_type = ckpt->alloc_type[i]; 996 seg_i->next_blkoff = blk_off; 997 998 if (seg_i->alloc_type == SSR) 999 blk_off = sbi->blocks_per_seg; 1000 1001 for (j = 0; j < blk_off; j++) { 1002 struct f2fs_summary *s; 1003 s = (struct f2fs_summary *)(kaddr + offset); 1004 seg_i->sum_blk->entries[j] = *s; 1005 offset += SUMMARY_SIZE; 1006 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1007 SUM_FOOTER_SIZE) 1008 continue; 1009 1010 f2fs_put_page(page, 1); 1011 page = NULL; 1012 1013 page = get_meta_page(sbi, start++); 1014 kaddr = (unsigned char *)page_address(page); 1015 offset = 0; 1016 } 1017 } 1018 f2fs_put_page(page, 1); 1019 return 0; 1020 } 1021 1022 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) 1023 { 1024 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1025 struct f2fs_summary_block *sum; 1026 struct curseg_info *curseg; 1027 struct page *new; 1028 unsigned short blk_off; 1029 unsigned int segno = 0; 1030 block_t blk_addr = 0; 1031 1032 /* get segment number and block addr */ 1033 if (IS_DATASEG(type)) { 1034 segno = le32_to_cpu(ckpt->cur_data_segno[type]); 1035 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - 1036 CURSEG_HOT_DATA]); 1037 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) 1038 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); 1039 else 1040 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); 1041 } else { 1042 segno = le32_to_cpu(ckpt->cur_node_segno[type - 1043 CURSEG_HOT_NODE]); 1044 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - 1045 CURSEG_HOT_NODE]); 1046 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) 1047 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, 1048 type - CURSEG_HOT_NODE); 1049 else 1050 blk_addr = GET_SUM_BLOCK(sbi, segno); 1051 } 1052 1053 new = get_meta_page(sbi, blk_addr); 1054 sum = (struct f2fs_summary_block *)page_address(new); 1055 1056 if (IS_NODESEG(type)) { 1057 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) { 1058 struct f2fs_summary *ns = &sum->entries[0]; 1059 int i; 1060 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { 1061 ns->version = 0; 1062 ns->ofs_in_node = 0; 1063 } 1064 } else { 1065 if (restore_node_summary(sbi, segno, sum)) { 1066 f2fs_put_page(new, 1); 1067 return -EINVAL; 1068 } 1069 } 1070 } 1071 1072 /* set uncompleted segment to curseg */ 1073 curseg = CURSEG_I(sbi, type); 1074 mutex_lock(&curseg->curseg_mutex); 1075 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE); 1076 curseg->next_segno = segno; 1077 reset_curseg(sbi, type, 0); 1078 curseg->alloc_type = ckpt->alloc_type[type]; 1079 curseg->next_blkoff = blk_off; 1080 mutex_unlock(&curseg->curseg_mutex); 1081 f2fs_put_page(new, 1); 1082 return 0; 1083 } 1084 1085 static int restore_curseg_summaries(struct f2fs_sb_info *sbi) 1086 { 1087 int type = CURSEG_HOT_DATA; 1088 1089 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { 1090 /* restore for compacted data summary */ 1091 if (read_compacted_summaries(sbi)) 1092 return -EINVAL; 1093 type = CURSEG_HOT_NODE; 1094 } 1095 1096 for (; type <= CURSEG_COLD_NODE; type++) 1097 if (read_normal_summaries(sbi, type)) 1098 return -EINVAL; 1099 return 0; 1100 } 1101 1102 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) 1103 { 1104 struct page *page; 1105 unsigned char *kaddr; 1106 struct f2fs_summary *summary; 1107 struct curseg_info *seg_i; 1108 int written_size = 0; 1109 int i, j; 1110 1111 page = grab_meta_page(sbi, blkaddr++); 1112 kaddr = (unsigned char *)page_address(page); 1113 1114 /* Step 1: write nat cache */ 1115 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 1116 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE); 1117 written_size += SUM_JOURNAL_SIZE; 1118 1119 /* Step 2: write sit cache */ 1120 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 1121 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits, 1122 SUM_JOURNAL_SIZE); 1123 written_size += SUM_JOURNAL_SIZE; 1124 1125 set_page_dirty(page); 1126 1127 /* Step 3: write summary entries */ 1128 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 1129 unsigned short blkoff; 1130 seg_i = CURSEG_I(sbi, i); 1131 if (sbi->ckpt->alloc_type[i] == SSR) 1132 blkoff = sbi->blocks_per_seg; 1133 else 1134 blkoff = curseg_blkoff(sbi, i); 1135 1136 for (j = 0; j < blkoff; j++) { 1137 if (!page) { 1138 page = grab_meta_page(sbi, blkaddr++); 1139 kaddr = (unsigned char *)page_address(page); 1140 written_size = 0; 1141 } 1142 summary = (struct f2fs_summary *)(kaddr + written_size); 1143 *summary = seg_i->sum_blk->entries[j]; 1144 written_size += SUMMARY_SIZE; 1145 set_page_dirty(page); 1146 1147 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1148 SUM_FOOTER_SIZE) 1149 continue; 1150 1151 f2fs_put_page(page, 1); 1152 page = NULL; 1153 } 1154 } 1155 if (page) 1156 f2fs_put_page(page, 1); 1157 } 1158 1159 static void write_normal_summaries(struct f2fs_sb_info *sbi, 1160 block_t blkaddr, int type) 1161 { 1162 int i, end; 1163 if (IS_DATASEG(type)) 1164 end = type + NR_CURSEG_DATA_TYPE; 1165 else 1166 end = type + NR_CURSEG_NODE_TYPE; 1167 1168 for (i = type; i < end; i++) { 1169 struct curseg_info *sum = CURSEG_I(sbi, i); 1170 mutex_lock(&sum->curseg_mutex); 1171 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type)); 1172 mutex_unlock(&sum->curseg_mutex); 1173 } 1174 } 1175 1176 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 1177 { 1178 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) 1179 write_compacted_summaries(sbi, start_blk); 1180 else 1181 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); 1182 } 1183 1184 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 1185 { 1186 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) 1187 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); 1188 } 1189 1190 int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type, 1191 unsigned int val, int alloc) 1192 { 1193 int i; 1194 1195 if (type == NAT_JOURNAL) { 1196 for (i = 0; i < nats_in_cursum(sum); i++) { 1197 if (le32_to_cpu(nid_in_journal(sum, i)) == val) 1198 return i; 1199 } 1200 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) 1201 return update_nats_in_cursum(sum, 1); 1202 } else if (type == SIT_JOURNAL) { 1203 for (i = 0; i < sits_in_cursum(sum); i++) 1204 if (le32_to_cpu(segno_in_journal(sum, i)) == val) 1205 return i; 1206 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES) 1207 return update_sits_in_cursum(sum, 1); 1208 } 1209 return -1; 1210 } 1211 1212 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, 1213 unsigned int segno) 1214 { 1215 struct sit_info *sit_i = SIT_I(sbi); 1216 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno); 1217 block_t blk_addr = sit_i->sit_base_addr + offset; 1218 1219 check_seg_range(sbi, segno); 1220 1221 /* calculate sit block address */ 1222 if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 1223 blk_addr += sit_i->sit_blocks; 1224 1225 return get_meta_page(sbi, blk_addr); 1226 } 1227 1228 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, 1229 unsigned int start) 1230 { 1231 struct sit_info *sit_i = SIT_I(sbi); 1232 struct page *src_page, *dst_page; 1233 pgoff_t src_off, dst_off; 1234 void *src_addr, *dst_addr; 1235 1236 src_off = current_sit_addr(sbi, start); 1237 dst_off = next_sit_addr(sbi, src_off); 1238 1239 /* get current sit block page without lock */ 1240 src_page = get_meta_page(sbi, src_off); 1241 dst_page = grab_meta_page(sbi, dst_off); 1242 BUG_ON(PageDirty(src_page)); 1243 1244 src_addr = page_address(src_page); 1245 dst_addr = page_address(dst_page); 1246 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 1247 1248 set_page_dirty(dst_page); 1249 f2fs_put_page(src_page, 1); 1250 1251 set_to_next_sit(sit_i, start); 1252 1253 return dst_page; 1254 } 1255 1256 static bool flush_sits_in_journal(struct f2fs_sb_info *sbi) 1257 { 1258 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1259 struct f2fs_summary_block *sum = curseg->sum_blk; 1260 int i; 1261 1262 /* 1263 * If the journal area in the current summary is full of sit entries, 1264 * all the sit entries will be flushed. Otherwise the sit entries 1265 * are not able to replace with newly hot sit entries. 1266 */ 1267 if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) { 1268 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) { 1269 unsigned int segno; 1270 segno = le32_to_cpu(segno_in_journal(sum, i)); 1271 __mark_sit_entry_dirty(sbi, segno); 1272 } 1273 update_sits_in_cursum(sum, -sits_in_cursum(sum)); 1274 return 1; 1275 } 1276 return 0; 1277 } 1278 1279 /* 1280 * CP calls this function, which flushes SIT entries including sit_journal, 1281 * and moves prefree segs to free segs. 1282 */ 1283 void flush_sit_entries(struct f2fs_sb_info *sbi) 1284 { 1285 struct sit_info *sit_i = SIT_I(sbi); 1286 unsigned long *bitmap = sit_i->dirty_sentries_bitmap; 1287 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1288 struct f2fs_summary_block *sum = curseg->sum_blk; 1289 unsigned long nsegs = TOTAL_SEGS(sbi); 1290 struct page *page = NULL; 1291 struct f2fs_sit_block *raw_sit = NULL; 1292 unsigned int start = 0, end = 0; 1293 unsigned int segno = -1; 1294 bool flushed; 1295 1296 mutex_lock(&curseg->curseg_mutex); 1297 mutex_lock(&sit_i->sentry_lock); 1298 1299 /* 1300 * "flushed" indicates whether sit entries in journal are flushed 1301 * to the SIT area or not. 1302 */ 1303 flushed = flush_sits_in_journal(sbi); 1304 1305 while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) { 1306 struct seg_entry *se = get_seg_entry(sbi, segno); 1307 int sit_offset, offset; 1308 1309 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); 1310 1311 if (flushed) 1312 goto to_sit_page; 1313 1314 offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1); 1315 if (offset >= 0) { 1316 segno_in_journal(sum, offset) = cpu_to_le32(segno); 1317 seg_info_to_raw_sit(se, &sit_in_journal(sum, offset)); 1318 goto flush_done; 1319 } 1320 to_sit_page: 1321 if (!page || (start > segno) || (segno > end)) { 1322 if (page) { 1323 f2fs_put_page(page, 1); 1324 page = NULL; 1325 } 1326 1327 start = START_SEGNO(sit_i, segno); 1328 end = start + SIT_ENTRY_PER_BLOCK - 1; 1329 1330 /* read sit block that will be updated */ 1331 page = get_next_sit_page(sbi, start); 1332 raw_sit = page_address(page); 1333 } 1334 1335 /* udpate entry in SIT block */ 1336 seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]); 1337 flush_done: 1338 __clear_bit(segno, bitmap); 1339 sit_i->dirty_sentries--; 1340 } 1341 mutex_unlock(&sit_i->sentry_lock); 1342 mutex_unlock(&curseg->curseg_mutex); 1343 1344 /* writeout last modified SIT block */ 1345 f2fs_put_page(page, 1); 1346 1347 set_prefree_as_free_segments(sbi); 1348 } 1349 1350 static int build_sit_info(struct f2fs_sb_info *sbi) 1351 { 1352 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 1353 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1354 struct sit_info *sit_i; 1355 unsigned int sit_segs, start; 1356 char *src_bitmap, *dst_bitmap; 1357 unsigned int bitmap_size; 1358 1359 /* allocate memory for SIT information */ 1360 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL); 1361 if (!sit_i) 1362 return -ENOMEM; 1363 1364 SM_I(sbi)->sit_info = sit_i; 1365 1366 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry)); 1367 if (!sit_i->sentries) 1368 return -ENOMEM; 1369 1370 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); 1371 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL); 1372 if (!sit_i->dirty_sentries_bitmap) 1373 return -ENOMEM; 1374 1375 for (start = 0; start < TOTAL_SEGS(sbi); start++) { 1376 sit_i->sentries[start].cur_valid_map 1377 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 1378 sit_i->sentries[start].ckpt_valid_map 1379 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 1380 if (!sit_i->sentries[start].cur_valid_map 1381 || !sit_i->sentries[start].ckpt_valid_map) 1382 return -ENOMEM; 1383 } 1384 1385 if (sbi->segs_per_sec > 1) { 1386 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) * 1387 sizeof(struct sec_entry)); 1388 if (!sit_i->sec_entries) 1389 return -ENOMEM; 1390 } 1391 1392 /* get information related with SIT */ 1393 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; 1394 1395 /* setup SIT bitmap from ckeckpoint pack */ 1396 bitmap_size = __bitmap_size(sbi, SIT_BITMAP); 1397 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); 1398 1399 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); 1400 if (!dst_bitmap) 1401 return -ENOMEM; 1402 1403 /* init SIT information */ 1404 sit_i->s_ops = &default_salloc_ops; 1405 1406 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); 1407 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; 1408 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count); 1409 sit_i->sit_bitmap = dst_bitmap; 1410 sit_i->bitmap_size = bitmap_size; 1411 sit_i->dirty_sentries = 0; 1412 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; 1413 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); 1414 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; 1415 mutex_init(&sit_i->sentry_lock); 1416 return 0; 1417 } 1418 1419 static int build_free_segmap(struct f2fs_sb_info *sbi) 1420 { 1421 struct f2fs_sm_info *sm_info = SM_I(sbi); 1422 struct free_segmap_info *free_i; 1423 unsigned int bitmap_size, sec_bitmap_size; 1424 1425 /* allocate memory for free segmap information */ 1426 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL); 1427 if (!free_i) 1428 return -ENOMEM; 1429 1430 SM_I(sbi)->free_info = free_i; 1431 1432 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); 1433 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL); 1434 if (!free_i->free_segmap) 1435 return -ENOMEM; 1436 1437 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi)); 1438 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL); 1439 if (!free_i->free_secmap) 1440 return -ENOMEM; 1441 1442 /* set all segments as dirty temporarily */ 1443 memset(free_i->free_segmap, 0xff, bitmap_size); 1444 memset(free_i->free_secmap, 0xff, sec_bitmap_size); 1445 1446 /* init free segmap information */ 1447 free_i->start_segno = 1448 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr); 1449 free_i->free_segments = 0; 1450 free_i->free_sections = 0; 1451 rwlock_init(&free_i->segmap_lock); 1452 return 0; 1453 } 1454 1455 static int build_curseg(struct f2fs_sb_info *sbi) 1456 { 1457 struct curseg_info *array; 1458 int i; 1459 1460 array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL); 1461 if (!array) 1462 return -ENOMEM; 1463 1464 SM_I(sbi)->curseg_array = array; 1465 1466 for (i = 0; i < NR_CURSEG_TYPE; i++) { 1467 mutex_init(&array[i].curseg_mutex); 1468 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 1469 if (!array[i].sum_blk) 1470 return -ENOMEM; 1471 array[i].segno = NULL_SEGNO; 1472 array[i].next_blkoff = 0; 1473 } 1474 return restore_curseg_summaries(sbi); 1475 } 1476 1477 static void build_sit_entries(struct f2fs_sb_info *sbi) 1478 { 1479 struct sit_info *sit_i = SIT_I(sbi); 1480 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1481 struct f2fs_summary_block *sum = curseg->sum_blk; 1482 unsigned int start; 1483 1484 for (start = 0; start < TOTAL_SEGS(sbi); start++) { 1485 struct seg_entry *se = &sit_i->sentries[start]; 1486 struct f2fs_sit_block *sit_blk; 1487 struct f2fs_sit_entry sit; 1488 struct page *page; 1489 int i; 1490 1491 mutex_lock(&curseg->curseg_mutex); 1492 for (i = 0; i < sits_in_cursum(sum); i++) { 1493 if (le32_to_cpu(segno_in_journal(sum, i)) == start) { 1494 sit = sit_in_journal(sum, i); 1495 mutex_unlock(&curseg->curseg_mutex); 1496 goto got_it; 1497 } 1498 } 1499 mutex_unlock(&curseg->curseg_mutex); 1500 page = get_current_sit_page(sbi, start); 1501 sit_blk = (struct f2fs_sit_block *)page_address(page); 1502 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; 1503 f2fs_put_page(page, 1); 1504 got_it: 1505 check_block_count(sbi, start, &sit); 1506 seg_info_from_raw_sit(se, &sit); 1507 if (sbi->segs_per_sec > 1) { 1508 struct sec_entry *e = get_sec_entry(sbi, start); 1509 e->valid_blocks += se->valid_blocks; 1510 } 1511 } 1512 } 1513 1514 static void init_free_segmap(struct f2fs_sb_info *sbi) 1515 { 1516 unsigned int start; 1517 int type; 1518 1519 for (start = 0; start < TOTAL_SEGS(sbi); start++) { 1520 struct seg_entry *sentry = get_seg_entry(sbi, start); 1521 if (!sentry->valid_blocks) 1522 __set_free(sbi, start); 1523 } 1524 1525 /* set use the current segments */ 1526 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { 1527 struct curseg_info *curseg_t = CURSEG_I(sbi, type); 1528 __set_test_and_inuse(sbi, curseg_t->segno); 1529 } 1530 } 1531 1532 static void init_dirty_segmap(struct f2fs_sb_info *sbi) 1533 { 1534 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1535 struct free_segmap_info *free_i = FREE_I(sbi); 1536 unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi); 1537 unsigned short valid_blocks; 1538 1539 while (1) { 1540 /* find dirty segment based on free segmap */ 1541 segno = find_next_inuse(free_i, total_segs, offset); 1542 if (segno >= total_segs) 1543 break; 1544 offset = segno + 1; 1545 valid_blocks = get_valid_blocks(sbi, segno, 0); 1546 if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks) 1547 continue; 1548 mutex_lock(&dirty_i->seglist_lock); 1549 __locate_dirty_segment(sbi, segno, DIRTY); 1550 mutex_unlock(&dirty_i->seglist_lock); 1551 } 1552 } 1553 1554 static int init_victim_secmap(struct f2fs_sb_info *sbi) 1555 { 1556 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1557 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi)); 1558 1559 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL); 1560 if (!dirty_i->victim_secmap) 1561 return -ENOMEM; 1562 return 0; 1563 } 1564 1565 static int build_dirty_segmap(struct f2fs_sb_info *sbi) 1566 { 1567 struct dirty_seglist_info *dirty_i; 1568 unsigned int bitmap_size, i; 1569 1570 /* allocate memory for dirty segments list information */ 1571 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL); 1572 if (!dirty_i) 1573 return -ENOMEM; 1574 1575 SM_I(sbi)->dirty_info = dirty_i; 1576 mutex_init(&dirty_i->seglist_lock); 1577 1578 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); 1579 1580 for (i = 0; i < NR_DIRTY_TYPE; i++) { 1581 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL); 1582 if (!dirty_i->dirty_segmap[i]) 1583 return -ENOMEM; 1584 } 1585 1586 init_dirty_segmap(sbi); 1587 return init_victim_secmap(sbi); 1588 } 1589 1590 /* 1591 * Update min, max modified time for cost-benefit GC algorithm 1592 */ 1593 static void init_min_max_mtime(struct f2fs_sb_info *sbi) 1594 { 1595 struct sit_info *sit_i = SIT_I(sbi); 1596 unsigned int segno; 1597 1598 mutex_lock(&sit_i->sentry_lock); 1599 1600 sit_i->min_mtime = LLONG_MAX; 1601 1602 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) { 1603 unsigned int i; 1604 unsigned long long mtime = 0; 1605 1606 for (i = 0; i < sbi->segs_per_sec; i++) 1607 mtime += get_seg_entry(sbi, segno + i)->mtime; 1608 1609 mtime = div_u64(mtime, sbi->segs_per_sec); 1610 1611 if (sit_i->min_mtime > mtime) 1612 sit_i->min_mtime = mtime; 1613 } 1614 sit_i->max_mtime = get_mtime(sbi); 1615 mutex_unlock(&sit_i->sentry_lock); 1616 } 1617 1618 int build_segment_manager(struct f2fs_sb_info *sbi) 1619 { 1620 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 1621 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1622 struct f2fs_sm_info *sm_info; 1623 int err; 1624 1625 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); 1626 if (!sm_info) 1627 return -ENOMEM; 1628 1629 /* init sm info */ 1630 sbi->sm_info = sm_info; 1631 INIT_LIST_HEAD(&sm_info->wblist_head); 1632 spin_lock_init(&sm_info->wblist_lock); 1633 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 1634 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 1635 sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 1636 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 1637 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 1638 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); 1639 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 1640 1641 err = build_sit_info(sbi); 1642 if (err) 1643 return err; 1644 err = build_free_segmap(sbi); 1645 if (err) 1646 return err; 1647 err = build_curseg(sbi); 1648 if (err) 1649 return err; 1650 1651 /* reinit free segmap based on SIT */ 1652 build_sit_entries(sbi); 1653 1654 init_free_segmap(sbi); 1655 err = build_dirty_segmap(sbi); 1656 if (err) 1657 return err; 1658 1659 init_min_max_mtime(sbi); 1660 return 0; 1661 } 1662 1663 static void discard_dirty_segmap(struct f2fs_sb_info *sbi, 1664 enum dirty_type dirty_type) 1665 { 1666 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1667 1668 mutex_lock(&dirty_i->seglist_lock); 1669 kfree(dirty_i->dirty_segmap[dirty_type]); 1670 dirty_i->nr_dirty[dirty_type] = 0; 1671 mutex_unlock(&dirty_i->seglist_lock); 1672 } 1673 1674 static void destroy_victim_secmap(struct f2fs_sb_info *sbi) 1675 { 1676 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1677 kfree(dirty_i->victim_secmap); 1678 } 1679 1680 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) 1681 { 1682 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1683 int i; 1684 1685 if (!dirty_i) 1686 return; 1687 1688 /* discard pre-free/dirty segments list */ 1689 for (i = 0; i < NR_DIRTY_TYPE; i++) 1690 discard_dirty_segmap(sbi, i); 1691 1692 destroy_victim_secmap(sbi); 1693 SM_I(sbi)->dirty_info = NULL; 1694 kfree(dirty_i); 1695 } 1696 1697 static void destroy_curseg(struct f2fs_sb_info *sbi) 1698 { 1699 struct curseg_info *array = SM_I(sbi)->curseg_array; 1700 int i; 1701 1702 if (!array) 1703 return; 1704 SM_I(sbi)->curseg_array = NULL; 1705 for (i = 0; i < NR_CURSEG_TYPE; i++) 1706 kfree(array[i].sum_blk); 1707 kfree(array); 1708 } 1709 1710 static void destroy_free_segmap(struct f2fs_sb_info *sbi) 1711 { 1712 struct free_segmap_info *free_i = SM_I(sbi)->free_info; 1713 if (!free_i) 1714 return; 1715 SM_I(sbi)->free_info = NULL; 1716 kfree(free_i->free_segmap); 1717 kfree(free_i->free_secmap); 1718 kfree(free_i); 1719 } 1720 1721 static void destroy_sit_info(struct f2fs_sb_info *sbi) 1722 { 1723 struct sit_info *sit_i = SIT_I(sbi); 1724 unsigned int start; 1725 1726 if (!sit_i) 1727 return; 1728 1729 if (sit_i->sentries) { 1730 for (start = 0; start < TOTAL_SEGS(sbi); start++) { 1731 kfree(sit_i->sentries[start].cur_valid_map); 1732 kfree(sit_i->sentries[start].ckpt_valid_map); 1733 } 1734 } 1735 vfree(sit_i->sentries); 1736 vfree(sit_i->sec_entries); 1737 kfree(sit_i->dirty_sentries_bitmap); 1738 1739 SM_I(sbi)->sit_info = NULL; 1740 kfree(sit_i->sit_bitmap); 1741 kfree(sit_i); 1742 } 1743 1744 void destroy_segment_manager(struct f2fs_sb_info *sbi) 1745 { 1746 struct f2fs_sm_info *sm_info = SM_I(sbi); 1747 destroy_dirty_segmap(sbi); 1748 destroy_curseg(sbi); 1749 destroy_free_segmap(sbi); 1750 destroy_sit_info(sbi); 1751 sbi->sm_info = NULL; 1752 kfree(sm_info); 1753 } 1754