1 /* 2 * fs/f2fs/segment.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/bio.h> 14 #include <linux/blkdev.h> 15 #include <linux/prefetch.h> 16 #include <linux/vmalloc.h> 17 18 #include "f2fs.h" 19 #include "segment.h" 20 #include "node.h" 21 #include <trace/events/f2fs.h> 22 23 /* 24 * This function balances dirty node and dentry pages. 25 * In addition, it controls garbage collection. 26 */ 27 void f2fs_balance_fs(struct f2fs_sb_info *sbi) 28 { 29 /* 30 * We should do GC or end up with checkpoint, if there are so many dirty 31 * dir/node pages without enough free segments. 32 */ 33 if (has_not_enough_free_secs(sbi, 0)) { 34 mutex_lock(&sbi->gc_mutex); 35 f2fs_gc(sbi); 36 } 37 } 38 39 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 40 enum dirty_type dirty_type) 41 { 42 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 43 44 /* need not be added */ 45 if (IS_CURSEG(sbi, segno)) 46 return; 47 48 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) 49 dirty_i->nr_dirty[dirty_type]++; 50 51 if (dirty_type == DIRTY) { 52 struct seg_entry *sentry = get_seg_entry(sbi, segno); 53 enum dirty_type t = DIRTY_HOT_DATA; 54 55 dirty_type = sentry->type; 56 57 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) 58 dirty_i->nr_dirty[dirty_type]++; 59 60 /* Only one bitmap should be set */ 61 for (; t <= DIRTY_COLD_NODE; t++) { 62 if (t == dirty_type) 63 continue; 64 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) 65 dirty_i->nr_dirty[t]--; 66 } 67 } 68 } 69 70 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 71 enum dirty_type dirty_type) 72 { 73 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 74 75 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) 76 dirty_i->nr_dirty[dirty_type]--; 77 78 if (dirty_type == DIRTY) { 79 enum dirty_type t = DIRTY_HOT_DATA; 80 81 /* clear its dirty bitmap */ 82 for (; t <= DIRTY_COLD_NODE; t++) { 83 if (test_and_clear_bit(segno, 84 dirty_i->dirty_segmap[t])) { 85 dirty_i->nr_dirty[t]--; 86 break; 87 } 88 } 89 90 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0) 91 clear_bit(GET_SECNO(sbi, segno), 92 dirty_i->victim_secmap); 93 } 94 } 95 96 /* 97 * Should not occur error such as -ENOMEM. 98 * Adding dirty entry into seglist is not critical operation. 99 * If a given segment is one of current working segments, it won't be added. 100 */ 101 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) 102 { 103 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 104 unsigned short valid_blocks; 105 106 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) 107 return; 108 109 mutex_lock(&dirty_i->seglist_lock); 110 111 valid_blocks = get_valid_blocks(sbi, segno, 0); 112 113 if (valid_blocks == 0) { 114 __locate_dirty_segment(sbi, segno, PRE); 115 __remove_dirty_segment(sbi, segno, DIRTY); 116 } else if (valid_blocks < sbi->blocks_per_seg) { 117 __locate_dirty_segment(sbi, segno, DIRTY); 118 } else { 119 /* Recovery routine with SSR needs this */ 120 __remove_dirty_segment(sbi, segno, DIRTY); 121 } 122 123 mutex_unlock(&dirty_i->seglist_lock); 124 } 125 126 /* 127 * Should call clear_prefree_segments after checkpoint is done. 128 */ 129 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) 130 { 131 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 132 unsigned int segno = -1; 133 unsigned int total_segs = TOTAL_SEGS(sbi); 134 135 mutex_lock(&dirty_i->seglist_lock); 136 while (1) { 137 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs, 138 segno + 1); 139 if (segno >= total_segs) 140 break; 141 __set_test_and_free(sbi, segno); 142 } 143 mutex_unlock(&dirty_i->seglist_lock); 144 } 145 146 void clear_prefree_segments(struct f2fs_sb_info *sbi) 147 { 148 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 149 unsigned int segno = -1; 150 unsigned int total_segs = TOTAL_SEGS(sbi); 151 152 mutex_lock(&dirty_i->seglist_lock); 153 while (1) { 154 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs, 155 segno + 1); 156 if (segno >= total_segs) 157 break; 158 159 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE])) 160 dirty_i->nr_dirty[PRE]--; 161 162 /* Let's use trim */ 163 if (test_opt(sbi, DISCARD)) 164 blkdev_issue_discard(sbi->sb->s_bdev, 165 START_BLOCK(sbi, segno) << 166 sbi->log_sectors_per_block, 167 1 << (sbi->log_sectors_per_block + 168 sbi->log_blocks_per_seg), 169 GFP_NOFS, 0); 170 } 171 mutex_unlock(&dirty_i->seglist_lock); 172 } 173 174 static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) 175 { 176 struct sit_info *sit_i = SIT_I(sbi); 177 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) 178 sit_i->dirty_sentries++; 179 } 180 181 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, 182 unsigned int segno, int modified) 183 { 184 struct seg_entry *se = get_seg_entry(sbi, segno); 185 se->type = type; 186 if (modified) 187 __mark_sit_entry_dirty(sbi, segno); 188 } 189 190 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) 191 { 192 struct seg_entry *se; 193 unsigned int segno, offset; 194 long int new_vblocks; 195 196 segno = GET_SEGNO(sbi, blkaddr); 197 198 se = get_seg_entry(sbi, segno); 199 new_vblocks = se->valid_blocks + del; 200 offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1); 201 202 BUG_ON((new_vblocks >> (sizeof(unsigned short) << 3) || 203 (new_vblocks > sbi->blocks_per_seg))); 204 205 se->valid_blocks = new_vblocks; 206 se->mtime = get_mtime(sbi); 207 SIT_I(sbi)->max_mtime = se->mtime; 208 209 /* Update valid block bitmap */ 210 if (del > 0) { 211 if (f2fs_set_bit(offset, se->cur_valid_map)) 212 BUG(); 213 } else { 214 if (!f2fs_clear_bit(offset, se->cur_valid_map)) 215 BUG(); 216 } 217 if (!f2fs_test_bit(offset, se->ckpt_valid_map)) 218 se->ckpt_valid_blocks += del; 219 220 __mark_sit_entry_dirty(sbi, segno); 221 222 /* update total number of valid blocks to be written in ckpt area */ 223 SIT_I(sbi)->written_valid_blocks += del; 224 225 if (sbi->segs_per_sec > 1) 226 get_sec_entry(sbi, segno)->valid_blocks += del; 227 } 228 229 static void refresh_sit_entry(struct f2fs_sb_info *sbi, 230 block_t old_blkaddr, block_t new_blkaddr) 231 { 232 update_sit_entry(sbi, new_blkaddr, 1); 233 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) 234 update_sit_entry(sbi, old_blkaddr, -1); 235 } 236 237 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) 238 { 239 unsigned int segno = GET_SEGNO(sbi, addr); 240 struct sit_info *sit_i = SIT_I(sbi); 241 242 BUG_ON(addr == NULL_ADDR); 243 if (addr == NEW_ADDR) 244 return; 245 246 /* add it into sit main buffer */ 247 mutex_lock(&sit_i->sentry_lock); 248 249 update_sit_entry(sbi, addr, -1); 250 251 /* add it into dirty seglist */ 252 locate_dirty_segment(sbi, segno); 253 254 mutex_unlock(&sit_i->sentry_lock); 255 } 256 257 /* 258 * This function should be resided under the curseg_mutex lock 259 */ 260 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, 261 struct f2fs_summary *sum) 262 { 263 struct curseg_info *curseg = CURSEG_I(sbi, type); 264 void *addr = curseg->sum_blk; 265 addr += curseg->next_blkoff * sizeof(struct f2fs_summary); 266 memcpy(addr, sum, sizeof(struct f2fs_summary)); 267 } 268 269 /* 270 * Calculate the number of current summary pages for writing 271 */ 272 int npages_for_summary_flush(struct f2fs_sb_info *sbi) 273 { 274 int total_size_bytes = 0; 275 int valid_sum_count = 0; 276 int i, sum_space; 277 278 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 279 if (sbi->ckpt->alloc_type[i] == SSR) 280 valid_sum_count += sbi->blocks_per_seg; 281 else 282 valid_sum_count += curseg_blkoff(sbi, i); 283 } 284 285 total_size_bytes = valid_sum_count * (SUMMARY_SIZE + 1) 286 + sizeof(struct nat_journal) + 2 287 + sizeof(struct sit_journal) + 2; 288 sum_space = PAGE_CACHE_SIZE - SUM_FOOTER_SIZE; 289 if (total_size_bytes < sum_space) 290 return 1; 291 else if (total_size_bytes < 2 * sum_space) 292 return 2; 293 return 3; 294 } 295 296 /* 297 * Caller should put this summary page 298 */ 299 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) 300 { 301 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno)); 302 } 303 304 static void write_sum_page(struct f2fs_sb_info *sbi, 305 struct f2fs_summary_block *sum_blk, block_t blk_addr) 306 { 307 struct page *page = grab_meta_page(sbi, blk_addr); 308 void *kaddr = page_address(page); 309 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE); 310 set_page_dirty(page); 311 f2fs_put_page(page, 1); 312 } 313 314 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) 315 { 316 struct curseg_info *curseg = CURSEG_I(sbi, type); 317 unsigned int segno = curseg->segno + 1; 318 struct free_segmap_info *free_i = FREE_I(sbi); 319 320 if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec) 321 return !test_bit(segno, free_i->free_segmap); 322 return 0; 323 } 324 325 /* 326 * Find a new segment from the free segments bitmap to right order 327 * This function should be returned with success, otherwise BUG 328 */ 329 static void get_new_segment(struct f2fs_sb_info *sbi, 330 unsigned int *newseg, bool new_sec, int dir) 331 { 332 struct free_segmap_info *free_i = FREE_I(sbi); 333 unsigned int segno, secno, zoneno; 334 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone; 335 unsigned int hint = *newseg / sbi->segs_per_sec; 336 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg); 337 unsigned int left_start = hint; 338 bool init = true; 339 int go_left = 0; 340 int i; 341 342 write_lock(&free_i->segmap_lock); 343 344 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { 345 segno = find_next_zero_bit(free_i->free_segmap, 346 TOTAL_SEGS(sbi), *newseg + 1); 347 if (segno - *newseg < sbi->segs_per_sec - 348 (*newseg % sbi->segs_per_sec)) 349 goto got_it; 350 } 351 find_other_zone: 352 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint); 353 if (secno >= TOTAL_SECS(sbi)) { 354 if (dir == ALLOC_RIGHT) { 355 secno = find_next_zero_bit(free_i->free_secmap, 356 TOTAL_SECS(sbi), 0); 357 BUG_ON(secno >= TOTAL_SECS(sbi)); 358 } else { 359 go_left = 1; 360 left_start = hint - 1; 361 } 362 } 363 if (go_left == 0) 364 goto skip_left; 365 366 while (test_bit(left_start, free_i->free_secmap)) { 367 if (left_start > 0) { 368 left_start--; 369 continue; 370 } 371 left_start = find_next_zero_bit(free_i->free_secmap, 372 TOTAL_SECS(sbi), 0); 373 BUG_ON(left_start >= TOTAL_SECS(sbi)); 374 break; 375 } 376 secno = left_start; 377 skip_left: 378 hint = secno; 379 segno = secno * sbi->segs_per_sec; 380 zoneno = secno / sbi->secs_per_zone; 381 382 /* give up on finding another zone */ 383 if (!init) 384 goto got_it; 385 if (sbi->secs_per_zone == 1) 386 goto got_it; 387 if (zoneno == old_zoneno) 388 goto got_it; 389 if (dir == ALLOC_LEFT) { 390 if (!go_left && zoneno + 1 >= total_zones) 391 goto got_it; 392 if (go_left && zoneno == 0) 393 goto got_it; 394 } 395 for (i = 0; i < NR_CURSEG_TYPE; i++) 396 if (CURSEG_I(sbi, i)->zone == zoneno) 397 break; 398 399 if (i < NR_CURSEG_TYPE) { 400 /* zone is in user, try another */ 401 if (go_left) 402 hint = zoneno * sbi->secs_per_zone - 1; 403 else if (zoneno + 1 >= total_zones) 404 hint = 0; 405 else 406 hint = (zoneno + 1) * sbi->secs_per_zone; 407 init = false; 408 goto find_other_zone; 409 } 410 got_it: 411 /* set it as dirty segment in free segmap */ 412 BUG_ON(test_bit(segno, free_i->free_segmap)); 413 __set_inuse(sbi, segno); 414 *newseg = segno; 415 write_unlock(&free_i->segmap_lock); 416 } 417 418 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) 419 { 420 struct curseg_info *curseg = CURSEG_I(sbi, type); 421 struct summary_footer *sum_footer; 422 423 curseg->segno = curseg->next_segno; 424 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno); 425 curseg->next_blkoff = 0; 426 curseg->next_segno = NULL_SEGNO; 427 428 sum_footer = &(curseg->sum_blk->footer); 429 memset(sum_footer, 0, sizeof(struct summary_footer)); 430 if (IS_DATASEG(type)) 431 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); 432 if (IS_NODESEG(type)) 433 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); 434 __set_sit_entry_type(sbi, type, curseg->segno, modified); 435 } 436 437 /* 438 * Allocate a current working segment. 439 * This function always allocates a free segment in LFS manner. 440 */ 441 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) 442 { 443 struct curseg_info *curseg = CURSEG_I(sbi, type); 444 unsigned int segno = curseg->segno; 445 int dir = ALLOC_LEFT; 446 447 write_sum_page(sbi, curseg->sum_blk, 448 GET_SUM_BLOCK(sbi, segno)); 449 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA) 450 dir = ALLOC_RIGHT; 451 452 if (test_opt(sbi, NOHEAP)) 453 dir = ALLOC_RIGHT; 454 455 get_new_segment(sbi, &segno, new_sec, dir); 456 curseg->next_segno = segno; 457 reset_curseg(sbi, type, 1); 458 curseg->alloc_type = LFS; 459 } 460 461 static void __next_free_blkoff(struct f2fs_sb_info *sbi, 462 struct curseg_info *seg, block_t start) 463 { 464 struct seg_entry *se = get_seg_entry(sbi, seg->segno); 465 block_t ofs; 466 for (ofs = start; ofs < sbi->blocks_per_seg; ofs++) { 467 if (!f2fs_test_bit(ofs, se->ckpt_valid_map) 468 && !f2fs_test_bit(ofs, se->cur_valid_map)) 469 break; 470 } 471 seg->next_blkoff = ofs; 472 } 473 474 /* 475 * If a segment is written by LFS manner, next block offset is just obtained 476 * by increasing the current block offset. However, if a segment is written by 477 * SSR manner, next block offset obtained by calling __next_free_blkoff 478 */ 479 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, 480 struct curseg_info *seg) 481 { 482 if (seg->alloc_type == SSR) 483 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1); 484 else 485 seg->next_blkoff++; 486 } 487 488 /* 489 * This function always allocates a used segment (from dirty seglist) by SSR 490 * manner, so it should recover the existing segment information of valid blocks 491 */ 492 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse) 493 { 494 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 495 struct curseg_info *curseg = CURSEG_I(sbi, type); 496 unsigned int new_segno = curseg->next_segno; 497 struct f2fs_summary_block *sum_node; 498 struct page *sum_page; 499 500 write_sum_page(sbi, curseg->sum_blk, 501 GET_SUM_BLOCK(sbi, curseg->segno)); 502 __set_test_and_inuse(sbi, new_segno); 503 504 mutex_lock(&dirty_i->seglist_lock); 505 __remove_dirty_segment(sbi, new_segno, PRE); 506 __remove_dirty_segment(sbi, new_segno, DIRTY); 507 mutex_unlock(&dirty_i->seglist_lock); 508 509 reset_curseg(sbi, type, 1); 510 curseg->alloc_type = SSR; 511 __next_free_blkoff(sbi, curseg, 0); 512 513 if (reuse) { 514 sum_page = get_sum_page(sbi, new_segno); 515 sum_node = (struct f2fs_summary_block *)page_address(sum_page); 516 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); 517 f2fs_put_page(sum_page, 1); 518 } 519 } 520 521 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) 522 { 523 struct curseg_info *curseg = CURSEG_I(sbi, type); 524 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops; 525 526 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0)) 527 return v_ops->get_victim(sbi, 528 &(curseg)->next_segno, BG_GC, type, SSR); 529 530 /* For data segments, let's do SSR more intensively */ 531 for (; type >= CURSEG_HOT_DATA; type--) 532 if (v_ops->get_victim(sbi, &(curseg)->next_segno, 533 BG_GC, type, SSR)) 534 return 1; 535 return 0; 536 } 537 538 /* 539 * flush out current segment and replace it with new segment 540 * This function should be returned with success, otherwise BUG 541 */ 542 static void allocate_segment_by_default(struct f2fs_sb_info *sbi, 543 int type, bool force) 544 { 545 struct curseg_info *curseg = CURSEG_I(sbi, type); 546 547 if (force) 548 new_curseg(sbi, type, true); 549 else if (type == CURSEG_WARM_NODE) 550 new_curseg(sbi, type, false); 551 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type)) 552 new_curseg(sbi, type, false); 553 else if (need_SSR(sbi) && get_ssr_segment(sbi, type)) 554 change_curseg(sbi, type, true); 555 else 556 new_curseg(sbi, type, false); 557 558 stat_inc_seg_type(sbi, curseg); 559 } 560 561 void allocate_new_segments(struct f2fs_sb_info *sbi) 562 { 563 struct curseg_info *curseg; 564 unsigned int old_curseg; 565 int i; 566 567 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 568 curseg = CURSEG_I(sbi, i); 569 old_curseg = curseg->segno; 570 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); 571 locate_dirty_segment(sbi, old_curseg); 572 } 573 } 574 575 static const struct segment_allocation default_salloc_ops = { 576 .allocate_segment = allocate_segment_by_default, 577 }; 578 579 static void f2fs_end_io_write(struct bio *bio, int err) 580 { 581 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 582 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 583 struct bio_private *p = bio->bi_private; 584 585 do { 586 struct page *page = bvec->bv_page; 587 588 if (--bvec >= bio->bi_io_vec) 589 prefetchw(&bvec->bv_page->flags); 590 if (!uptodate) { 591 SetPageError(page); 592 if (page->mapping) 593 set_bit(AS_EIO, &page->mapping->flags); 594 set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG); 595 p->sbi->sb->s_flags |= MS_RDONLY; 596 } 597 end_page_writeback(page); 598 dec_page_count(p->sbi, F2FS_WRITEBACK); 599 } while (bvec >= bio->bi_io_vec); 600 601 if (p->is_sync) 602 complete(p->wait); 603 604 if (!get_pages(p->sbi, F2FS_WRITEBACK) && p->sbi->cp_task) 605 wake_up_process(p->sbi->cp_task); 606 607 kfree(p); 608 bio_put(bio); 609 } 610 611 struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages) 612 { 613 struct bio *bio; 614 615 /* No failure on bio allocation */ 616 bio = bio_alloc(GFP_NOIO, npages); 617 bio->bi_bdev = bdev; 618 bio->bi_private = NULL; 619 620 return bio; 621 } 622 623 static void do_submit_bio(struct f2fs_sb_info *sbi, 624 enum page_type type, bool sync) 625 { 626 int rw = sync ? WRITE_SYNC : WRITE; 627 enum page_type btype = type > META ? META : type; 628 629 if (type >= META_FLUSH) 630 rw = WRITE_FLUSH_FUA; 631 632 if (btype == META) 633 rw |= REQ_META; 634 635 if (sbi->bio[btype]) { 636 struct bio_private *p = sbi->bio[btype]->bi_private; 637 p->sbi = sbi; 638 sbi->bio[btype]->bi_end_io = f2fs_end_io_write; 639 640 trace_f2fs_do_submit_bio(sbi->sb, btype, sync, sbi->bio[btype]); 641 642 if (type == META_FLUSH) { 643 DECLARE_COMPLETION_ONSTACK(wait); 644 p->is_sync = true; 645 p->wait = &wait; 646 submit_bio(rw, sbi->bio[btype]); 647 wait_for_completion(&wait); 648 } else { 649 p->is_sync = false; 650 submit_bio(rw, sbi->bio[btype]); 651 } 652 sbi->bio[btype] = NULL; 653 } 654 } 655 656 void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync) 657 { 658 down_write(&sbi->bio_sem); 659 do_submit_bio(sbi, type, sync); 660 up_write(&sbi->bio_sem); 661 } 662 663 static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page, 664 block_t blk_addr, enum page_type type) 665 { 666 struct block_device *bdev = sbi->sb->s_bdev; 667 int bio_blocks; 668 669 verify_block_addr(sbi, blk_addr); 670 671 down_write(&sbi->bio_sem); 672 673 inc_page_count(sbi, F2FS_WRITEBACK); 674 675 if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1) 676 do_submit_bio(sbi, type, false); 677 alloc_new: 678 if (sbi->bio[type] == NULL) { 679 struct bio_private *priv; 680 retry: 681 priv = kmalloc(sizeof(struct bio_private), GFP_NOFS); 682 if (!priv) { 683 cond_resched(); 684 goto retry; 685 } 686 687 bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); 688 sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks); 689 sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); 690 sbi->bio[type]->bi_private = priv; 691 /* 692 * The end_io will be assigned at the sumbission phase. 693 * Until then, let bio_add_page() merge consecutive IOs as much 694 * as possible. 695 */ 696 } 697 698 if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) < 699 PAGE_CACHE_SIZE) { 700 do_submit_bio(sbi, type, false); 701 goto alloc_new; 702 } 703 704 sbi->last_block_in_bio[type] = blk_addr; 705 706 up_write(&sbi->bio_sem); 707 trace_f2fs_submit_write_page(page, blk_addr, type); 708 } 709 710 void f2fs_wait_on_page_writeback(struct page *page, 711 enum page_type type, bool sync) 712 { 713 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 714 if (PageWriteback(page)) { 715 f2fs_submit_bio(sbi, type, sync); 716 wait_on_page_writeback(page); 717 } 718 } 719 720 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) 721 { 722 struct curseg_info *curseg = CURSEG_I(sbi, type); 723 if (curseg->next_blkoff < sbi->blocks_per_seg) 724 return true; 725 return false; 726 } 727 728 static int __get_segment_type_2(struct page *page, enum page_type p_type) 729 { 730 if (p_type == DATA) 731 return CURSEG_HOT_DATA; 732 else 733 return CURSEG_HOT_NODE; 734 } 735 736 static int __get_segment_type_4(struct page *page, enum page_type p_type) 737 { 738 if (p_type == DATA) { 739 struct inode *inode = page->mapping->host; 740 741 if (S_ISDIR(inode->i_mode)) 742 return CURSEG_HOT_DATA; 743 else 744 return CURSEG_COLD_DATA; 745 } else { 746 if (IS_DNODE(page) && !is_cold_node(page)) 747 return CURSEG_HOT_NODE; 748 else 749 return CURSEG_COLD_NODE; 750 } 751 } 752 753 static int __get_segment_type_6(struct page *page, enum page_type p_type) 754 { 755 if (p_type == DATA) { 756 struct inode *inode = page->mapping->host; 757 758 if (S_ISDIR(inode->i_mode)) 759 return CURSEG_HOT_DATA; 760 else if (is_cold_data(page) || file_is_cold(inode)) 761 return CURSEG_COLD_DATA; 762 else 763 return CURSEG_WARM_DATA; 764 } else { 765 if (IS_DNODE(page)) 766 return is_cold_node(page) ? CURSEG_WARM_NODE : 767 CURSEG_HOT_NODE; 768 else 769 return CURSEG_COLD_NODE; 770 } 771 } 772 773 static int __get_segment_type(struct page *page, enum page_type p_type) 774 { 775 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 776 switch (sbi->active_logs) { 777 case 2: 778 return __get_segment_type_2(page, p_type); 779 case 4: 780 return __get_segment_type_4(page, p_type); 781 } 782 /* NR_CURSEG_TYPE(6) logs by default */ 783 BUG_ON(sbi->active_logs != NR_CURSEG_TYPE); 784 return __get_segment_type_6(page, p_type); 785 } 786 787 static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, 788 block_t old_blkaddr, block_t *new_blkaddr, 789 struct f2fs_summary *sum, enum page_type p_type) 790 { 791 struct sit_info *sit_i = SIT_I(sbi); 792 struct curseg_info *curseg; 793 unsigned int old_cursegno; 794 int type; 795 796 type = __get_segment_type(page, p_type); 797 curseg = CURSEG_I(sbi, type); 798 799 mutex_lock(&curseg->curseg_mutex); 800 801 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 802 old_cursegno = curseg->segno; 803 804 /* 805 * __add_sum_entry should be resided under the curseg_mutex 806 * because, this function updates a summary entry in the 807 * current summary block. 808 */ 809 __add_sum_entry(sbi, type, sum); 810 811 mutex_lock(&sit_i->sentry_lock); 812 __refresh_next_blkoff(sbi, curseg); 813 814 stat_inc_block_count(sbi, curseg); 815 816 /* 817 * SIT information should be updated before segment allocation, 818 * since SSR needs latest valid block information. 819 */ 820 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); 821 822 if (!__has_curseg_space(sbi, type)) 823 sit_i->s_ops->allocate_segment(sbi, type, false); 824 825 locate_dirty_segment(sbi, old_cursegno); 826 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 827 mutex_unlock(&sit_i->sentry_lock); 828 829 if (p_type == NODE) 830 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); 831 832 /* writeout dirty page into bdev */ 833 submit_write_page(sbi, page, *new_blkaddr, p_type); 834 835 mutex_unlock(&curseg->curseg_mutex); 836 } 837 838 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) 839 { 840 set_page_writeback(page); 841 submit_write_page(sbi, page, page->index, META); 842 } 843 844 void write_node_page(struct f2fs_sb_info *sbi, struct page *page, 845 unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr) 846 { 847 struct f2fs_summary sum; 848 set_summary(&sum, nid, 0, 0); 849 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE); 850 } 851 852 void write_data_page(struct inode *inode, struct page *page, 853 struct dnode_of_data *dn, block_t old_blkaddr, 854 block_t *new_blkaddr) 855 { 856 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 857 struct f2fs_summary sum; 858 struct node_info ni; 859 860 BUG_ON(old_blkaddr == NULL_ADDR); 861 get_node_info(sbi, dn->nid, &ni); 862 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 863 864 do_write_page(sbi, page, old_blkaddr, 865 new_blkaddr, &sum, DATA); 866 } 867 868 void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page, 869 block_t old_blk_addr) 870 { 871 submit_write_page(sbi, page, old_blk_addr, DATA); 872 } 873 874 void recover_data_page(struct f2fs_sb_info *sbi, 875 struct page *page, struct f2fs_summary *sum, 876 block_t old_blkaddr, block_t new_blkaddr) 877 { 878 struct sit_info *sit_i = SIT_I(sbi); 879 struct curseg_info *curseg; 880 unsigned int segno, old_cursegno; 881 struct seg_entry *se; 882 int type; 883 884 segno = GET_SEGNO(sbi, new_blkaddr); 885 se = get_seg_entry(sbi, segno); 886 type = se->type; 887 888 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { 889 if (old_blkaddr == NULL_ADDR) 890 type = CURSEG_COLD_DATA; 891 else 892 type = CURSEG_WARM_DATA; 893 } 894 curseg = CURSEG_I(sbi, type); 895 896 mutex_lock(&curseg->curseg_mutex); 897 mutex_lock(&sit_i->sentry_lock); 898 899 old_cursegno = curseg->segno; 900 901 /* change the current segment */ 902 if (segno != curseg->segno) { 903 curseg->next_segno = segno; 904 change_curseg(sbi, type, true); 905 } 906 907 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & 908 (sbi->blocks_per_seg - 1); 909 __add_sum_entry(sbi, type, sum); 910 911 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); 912 913 locate_dirty_segment(sbi, old_cursegno); 914 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 915 916 mutex_unlock(&sit_i->sentry_lock); 917 mutex_unlock(&curseg->curseg_mutex); 918 } 919 920 void rewrite_node_page(struct f2fs_sb_info *sbi, 921 struct page *page, struct f2fs_summary *sum, 922 block_t old_blkaddr, block_t new_blkaddr) 923 { 924 struct sit_info *sit_i = SIT_I(sbi); 925 int type = CURSEG_WARM_NODE; 926 struct curseg_info *curseg; 927 unsigned int segno, old_cursegno; 928 block_t next_blkaddr = next_blkaddr_of_node(page); 929 unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr); 930 931 curseg = CURSEG_I(sbi, type); 932 933 mutex_lock(&curseg->curseg_mutex); 934 mutex_lock(&sit_i->sentry_lock); 935 936 segno = GET_SEGNO(sbi, new_blkaddr); 937 old_cursegno = curseg->segno; 938 939 /* change the current segment */ 940 if (segno != curseg->segno) { 941 curseg->next_segno = segno; 942 change_curseg(sbi, type, true); 943 } 944 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & 945 (sbi->blocks_per_seg - 1); 946 __add_sum_entry(sbi, type, sum); 947 948 /* change the current log to the next block addr in advance */ 949 if (next_segno != segno) { 950 curseg->next_segno = next_segno; 951 change_curseg(sbi, type, true); 952 } 953 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) & 954 (sbi->blocks_per_seg - 1); 955 956 /* rewrite node page */ 957 set_page_writeback(page); 958 submit_write_page(sbi, page, new_blkaddr, NODE); 959 f2fs_submit_bio(sbi, NODE, true); 960 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); 961 962 locate_dirty_segment(sbi, old_cursegno); 963 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); 964 965 mutex_unlock(&sit_i->sentry_lock); 966 mutex_unlock(&curseg->curseg_mutex); 967 } 968 969 static int read_compacted_summaries(struct f2fs_sb_info *sbi) 970 { 971 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 972 struct curseg_info *seg_i; 973 unsigned char *kaddr; 974 struct page *page; 975 block_t start; 976 int i, j, offset; 977 978 start = start_sum_block(sbi); 979 980 page = get_meta_page(sbi, start++); 981 kaddr = (unsigned char *)page_address(page); 982 983 /* Step 1: restore nat cache */ 984 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 985 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE); 986 987 /* Step 2: restore sit cache */ 988 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 989 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE, 990 SUM_JOURNAL_SIZE); 991 offset = 2 * SUM_JOURNAL_SIZE; 992 993 /* Step 3: restore summary entries */ 994 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 995 unsigned short blk_off; 996 unsigned int segno; 997 998 seg_i = CURSEG_I(sbi, i); 999 segno = le32_to_cpu(ckpt->cur_data_segno[i]); 1000 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); 1001 seg_i->next_segno = segno; 1002 reset_curseg(sbi, i, 0); 1003 seg_i->alloc_type = ckpt->alloc_type[i]; 1004 seg_i->next_blkoff = blk_off; 1005 1006 if (seg_i->alloc_type == SSR) 1007 blk_off = sbi->blocks_per_seg; 1008 1009 for (j = 0; j < blk_off; j++) { 1010 struct f2fs_summary *s; 1011 s = (struct f2fs_summary *)(kaddr + offset); 1012 seg_i->sum_blk->entries[j] = *s; 1013 offset += SUMMARY_SIZE; 1014 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1015 SUM_FOOTER_SIZE) 1016 continue; 1017 1018 f2fs_put_page(page, 1); 1019 page = NULL; 1020 1021 page = get_meta_page(sbi, start++); 1022 kaddr = (unsigned char *)page_address(page); 1023 offset = 0; 1024 } 1025 } 1026 f2fs_put_page(page, 1); 1027 return 0; 1028 } 1029 1030 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) 1031 { 1032 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1033 struct f2fs_summary_block *sum; 1034 struct curseg_info *curseg; 1035 struct page *new; 1036 unsigned short blk_off; 1037 unsigned int segno = 0; 1038 block_t blk_addr = 0; 1039 1040 /* get segment number and block addr */ 1041 if (IS_DATASEG(type)) { 1042 segno = le32_to_cpu(ckpt->cur_data_segno[type]); 1043 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - 1044 CURSEG_HOT_DATA]); 1045 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) 1046 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); 1047 else 1048 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); 1049 } else { 1050 segno = le32_to_cpu(ckpt->cur_node_segno[type - 1051 CURSEG_HOT_NODE]); 1052 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - 1053 CURSEG_HOT_NODE]); 1054 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) 1055 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, 1056 type - CURSEG_HOT_NODE); 1057 else 1058 blk_addr = GET_SUM_BLOCK(sbi, segno); 1059 } 1060 1061 new = get_meta_page(sbi, blk_addr); 1062 sum = (struct f2fs_summary_block *)page_address(new); 1063 1064 if (IS_NODESEG(type)) { 1065 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) { 1066 struct f2fs_summary *ns = &sum->entries[0]; 1067 int i; 1068 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { 1069 ns->version = 0; 1070 ns->ofs_in_node = 0; 1071 } 1072 } else { 1073 if (restore_node_summary(sbi, segno, sum)) { 1074 f2fs_put_page(new, 1); 1075 return -EINVAL; 1076 } 1077 } 1078 } 1079 1080 /* set uncompleted segment to curseg */ 1081 curseg = CURSEG_I(sbi, type); 1082 mutex_lock(&curseg->curseg_mutex); 1083 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE); 1084 curseg->next_segno = segno; 1085 reset_curseg(sbi, type, 0); 1086 curseg->alloc_type = ckpt->alloc_type[type]; 1087 curseg->next_blkoff = blk_off; 1088 mutex_unlock(&curseg->curseg_mutex); 1089 f2fs_put_page(new, 1); 1090 return 0; 1091 } 1092 1093 static int restore_curseg_summaries(struct f2fs_sb_info *sbi) 1094 { 1095 int type = CURSEG_HOT_DATA; 1096 1097 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { 1098 /* restore for compacted data summary */ 1099 if (read_compacted_summaries(sbi)) 1100 return -EINVAL; 1101 type = CURSEG_HOT_NODE; 1102 } 1103 1104 for (; type <= CURSEG_COLD_NODE; type++) 1105 if (read_normal_summaries(sbi, type)) 1106 return -EINVAL; 1107 return 0; 1108 } 1109 1110 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) 1111 { 1112 struct page *page; 1113 unsigned char *kaddr; 1114 struct f2fs_summary *summary; 1115 struct curseg_info *seg_i; 1116 int written_size = 0; 1117 int i, j; 1118 1119 page = grab_meta_page(sbi, blkaddr++); 1120 kaddr = (unsigned char *)page_address(page); 1121 1122 /* Step 1: write nat cache */ 1123 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); 1124 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE); 1125 written_size += SUM_JOURNAL_SIZE; 1126 1127 /* Step 2: write sit cache */ 1128 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); 1129 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits, 1130 SUM_JOURNAL_SIZE); 1131 written_size += SUM_JOURNAL_SIZE; 1132 1133 set_page_dirty(page); 1134 1135 /* Step 3: write summary entries */ 1136 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 1137 unsigned short blkoff; 1138 seg_i = CURSEG_I(sbi, i); 1139 if (sbi->ckpt->alloc_type[i] == SSR) 1140 blkoff = sbi->blocks_per_seg; 1141 else 1142 blkoff = curseg_blkoff(sbi, i); 1143 1144 for (j = 0; j < blkoff; j++) { 1145 if (!page) { 1146 page = grab_meta_page(sbi, blkaddr++); 1147 kaddr = (unsigned char *)page_address(page); 1148 written_size = 0; 1149 } 1150 summary = (struct f2fs_summary *)(kaddr + written_size); 1151 *summary = seg_i->sum_blk->entries[j]; 1152 written_size += SUMMARY_SIZE; 1153 set_page_dirty(page); 1154 1155 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - 1156 SUM_FOOTER_SIZE) 1157 continue; 1158 1159 f2fs_put_page(page, 1); 1160 page = NULL; 1161 } 1162 } 1163 if (page) 1164 f2fs_put_page(page, 1); 1165 } 1166 1167 static void write_normal_summaries(struct f2fs_sb_info *sbi, 1168 block_t blkaddr, int type) 1169 { 1170 int i, end; 1171 if (IS_DATASEG(type)) 1172 end = type + NR_CURSEG_DATA_TYPE; 1173 else 1174 end = type + NR_CURSEG_NODE_TYPE; 1175 1176 for (i = type; i < end; i++) { 1177 struct curseg_info *sum = CURSEG_I(sbi, i); 1178 mutex_lock(&sum->curseg_mutex); 1179 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type)); 1180 mutex_unlock(&sum->curseg_mutex); 1181 } 1182 } 1183 1184 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 1185 { 1186 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) 1187 write_compacted_summaries(sbi, start_blk); 1188 else 1189 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); 1190 } 1191 1192 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) 1193 { 1194 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) 1195 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); 1196 } 1197 1198 int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type, 1199 unsigned int val, int alloc) 1200 { 1201 int i; 1202 1203 if (type == NAT_JOURNAL) { 1204 for (i = 0; i < nats_in_cursum(sum); i++) { 1205 if (le32_to_cpu(nid_in_journal(sum, i)) == val) 1206 return i; 1207 } 1208 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) 1209 return update_nats_in_cursum(sum, 1); 1210 } else if (type == SIT_JOURNAL) { 1211 for (i = 0; i < sits_in_cursum(sum); i++) 1212 if (le32_to_cpu(segno_in_journal(sum, i)) == val) 1213 return i; 1214 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES) 1215 return update_sits_in_cursum(sum, 1); 1216 } 1217 return -1; 1218 } 1219 1220 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, 1221 unsigned int segno) 1222 { 1223 struct sit_info *sit_i = SIT_I(sbi); 1224 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno); 1225 block_t blk_addr = sit_i->sit_base_addr + offset; 1226 1227 check_seg_range(sbi, segno); 1228 1229 /* calculate sit block address */ 1230 if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 1231 blk_addr += sit_i->sit_blocks; 1232 1233 return get_meta_page(sbi, blk_addr); 1234 } 1235 1236 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, 1237 unsigned int start) 1238 { 1239 struct sit_info *sit_i = SIT_I(sbi); 1240 struct page *src_page, *dst_page; 1241 pgoff_t src_off, dst_off; 1242 void *src_addr, *dst_addr; 1243 1244 src_off = current_sit_addr(sbi, start); 1245 dst_off = next_sit_addr(sbi, src_off); 1246 1247 /* get current sit block page without lock */ 1248 src_page = get_meta_page(sbi, src_off); 1249 dst_page = grab_meta_page(sbi, dst_off); 1250 BUG_ON(PageDirty(src_page)); 1251 1252 src_addr = page_address(src_page); 1253 dst_addr = page_address(dst_page); 1254 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 1255 1256 set_page_dirty(dst_page); 1257 f2fs_put_page(src_page, 1); 1258 1259 set_to_next_sit(sit_i, start); 1260 1261 return dst_page; 1262 } 1263 1264 static bool flush_sits_in_journal(struct f2fs_sb_info *sbi) 1265 { 1266 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1267 struct f2fs_summary_block *sum = curseg->sum_blk; 1268 int i; 1269 1270 /* 1271 * If the journal area in the current summary is full of sit entries, 1272 * all the sit entries will be flushed. Otherwise the sit entries 1273 * are not able to replace with newly hot sit entries. 1274 */ 1275 if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) { 1276 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) { 1277 unsigned int segno; 1278 segno = le32_to_cpu(segno_in_journal(sum, i)); 1279 __mark_sit_entry_dirty(sbi, segno); 1280 } 1281 update_sits_in_cursum(sum, -sits_in_cursum(sum)); 1282 return true; 1283 } 1284 return false; 1285 } 1286 1287 /* 1288 * CP calls this function, which flushes SIT entries including sit_journal, 1289 * and moves prefree segs to free segs. 1290 */ 1291 void flush_sit_entries(struct f2fs_sb_info *sbi) 1292 { 1293 struct sit_info *sit_i = SIT_I(sbi); 1294 unsigned long *bitmap = sit_i->dirty_sentries_bitmap; 1295 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1296 struct f2fs_summary_block *sum = curseg->sum_blk; 1297 unsigned long nsegs = TOTAL_SEGS(sbi); 1298 struct page *page = NULL; 1299 struct f2fs_sit_block *raw_sit = NULL; 1300 unsigned int start = 0, end = 0; 1301 unsigned int segno = -1; 1302 bool flushed; 1303 1304 mutex_lock(&curseg->curseg_mutex); 1305 mutex_lock(&sit_i->sentry_lock); 1306 1307 /* 1308 * "flushed" indicates whether sit entries in journal are flushed 1309 * to the SIT area or not. 1310 */ 1311 flushed = flush_sits_in_journal(sbi); 1312 1313 while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) { 1314 struct seg_entry *se = get_seg_entry(sbi, segno); 1315 int sit_offset, offset; 1316 1317 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); 1318 1319 if (flushed) 1320 goto to_sit_page; 1321 1322 offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1); 1323 if (offset >= 0) { 1324 segno_in_journal(sum, offset) = cpu_to_le32(segno); 1325 seg_info_to_raw_sit(se, &sit_in_journal(sum, offset)); 1326 goto flush_done; 1327 } 1328 to_sit_page: 1329 if (!page || (start > segno) || (segno > end)) { 1330 if (page) { 1331 f2fs_put_page(page, 1); 1332 page = NULL; 1333 } 1334 1335 start = START_SEGNO(sit_i, segno); 1336 end = start + SIT_ENTRY_PER_BLOCK - 1; 1337 1338 /* read sit block that will be updated */ 1339 page = get_next_sit_page(sbi, start); 1340 raw_sit = page_address(page); 1341 } 1342 1343 /* udpate entry in SIT block */ 1344 seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]); 1345 flush_done: 1346 __clear_bit(segno, bitmap); 1347 sit_i->dirty_sentries--; 1348 } 1349 mutex_unlock(&sit_i->sentry_lock); 1350 mutex_unlock(&curseg->curseg_mutex); 1351 1352 /* writeout last modified SIT block */ 1353 f2fs_put_page(page, 1); 1354 1355 set_prefree_as_free_segments(sbi); 1356 } 1357 1358 static int build_sit_info(struct f2fs_sb_info *sbi) 1359 { 1360 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 1361 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1362 struct sit_info *sit_i; 1363 unsigned int sit_segs, start; 1364 char *src_bitmap, *dst_bitmap; 1365 unsigned int bitmap_size; 1366 1367 /* allocate memory for SIT information */ 1368 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL); 1369 if (!sit_i) 1370 return -ENOMEM; 1371 1372 SM_I(sbi)->sit_info = sit_i; 1373 1374 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry)); 1375 if (!sit_i->sentries) 1376 return -ENOMEM; 1377 1378 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); 1379 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL); 1380 if (!sit_i->dirty_sentries_bitmap) 1381 return -ENOMEM; 1382 1383 for (start = 0; start < TOTAL_SEGS(sbi); start++) { 1384 sit_i->sentries[start].cur_valid_map 1385 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 1386 sit_i->sentries[start].ckpt_valid_map 1387 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 1388 if (!sit_i->sentries[start].cur_valid_map 1389 || !sit_i->sentries[start].ckpt_valid_map) 1390 return -ENOMEM; 1391 } 1392 1393 if (sbi->segs_per_sec > 1) { 1394 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) * 1395 sizeof(struct sec_entry)); 1396 if (!sit_i->sec_entries) 1397 return -ENOMEM; 1398 } 1399 1400 /* get information related with SIT */ 1401 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; 1402 1403 /* setup SIT bitmap from ckeckpoint pack */ 1404 bitmap_size = __bitmap_size(sbi, SIT_BITMAP); 1405 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); 1406 1407 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); 1408 if (!dst_bitmap) 1409 return -ENOMEM; 1410 1411 /* init SIT information */ 1412 sit_i->s_ops = &default_salloc_ops; 1413 1414 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); 1415 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; 1416 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count); 1417 sit_i->sit_bitmap = dst_bitmap; 1418 sit_i->bitmap_size = bitmap_size; 1419 sit_i->dirty_sentries = 0; 1420 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; 1421 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); 1422 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; 1423 mutex_init(&sit_i->sentry_lock); 1424 return 0; 1425 } 1426 1427 static int build_free_segmap(struct f2fs_sb_info *sbi) 1428 { 1429 struct f2fs_sm_info *sm_info = SM_I(sbi); 1430 struct free_segmap_info *free_i; 1431 unsigned int bitmap_size, sec_bitmap_size; 1432 1433 /* allocate memory for free segmap information */ 1434 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL); 1435 if (!free_i) 1436 return -ENOMEM; 1437 1438 SM_I(sbi)->free_info = free_i; 1439 1440 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); 1441 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL); 1442 if (!free_i->free_segmap) 1443 return -ENOMEM; 1444 1445 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi)); 1446 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL); 1447 if (!free_i->free_secmap) 1448 return -ENOMEM; 1449 1450 /* set all segments as dirty temporarily */ 1451 memset(free_i->free_segmap, 0xff, bitmap_size); 1452 memset(free_i->free_secmap, 0xff, sec_bitmap_size); 1453 1454 /* init free segmap information */ 1455 free_i->start_segno = 1456 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr); 1457 free_i->free_segments = 0; 1458 free_i->free_sections = 0; 1459 rwlock_init(&free_i->segmap_lock); 1460 return 0; 1461 } 1462 1463 static int build_curseg(struct f2fs_sb_info *sbi) 1464 { 1465 struct curseg_info *array; 1466 int i; 1467 1468 array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL); 1469 if (!array) 1470 return -ENOMEM; 1471 1472 SM_I(sbi)->curseg_array = array; 1473 1474 for (i = 0; i < NR_CURSEG_TYPE; i++) { 1475 mutex_init(&array[i].curseg_mutex); 1476 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 1477 if (!array[i].sum_blk) 1478 return -ENOMEM; 1479 array[i].segno = NULL_SEGNO; 1480 array[i].next_blkoff = 0; 1481 } 1482 return restore_curseg_summaries(sbi); 1483 } 1484 1485 static void build_sit_entries(struct f2fs_sb_info *sbi) 1486 { 1487 struct sit_info *sit_i = SIT_I(sbi); 1488 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1489 struct f2fs_summary_block *sum = curseg->sum_blk; 1490 unsigned int start; 1491 1492 for (start = 0; start < TOTAL_SEGS(sbi); start++) { 1493 struct seg_entry *se = &sit_i->sentries[start]; 1494 struct f2fs_sit_block *sit_blk; 1495 struct f2fs_sit_entry sit; 1496 struct page *page; 1497 int i; 1498 1499 mutex_lock(&curseg->curseg_mutex); 1500 for (i = 0; i < sits_in_cursum(sum); i++) { 1501 if (le32_to_cpu(segno_in_journal(sum, i)) == start) { 1502 sit = sit_in_journal(sum, i); 1503 mutex_unlock(&curseg->curseg_mutex); 1504 goto got_it; 1505 } 1506 } 1507 mutex_unlock(&curseg->curseg_mutex); 1508 page = get_current_sit_page(sbi, start); 1509 sit_blk = (struct f2fs_sit_block *)page_address(page); 1510 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; 1511 f2fs_put_page(page, 1); 1512 got_it: 1513 check_block_count(sbi, start, &sit); 1514 seg_info_from_raw_sit(se, &sit); 1515 if (sbi->segs_per_sec > 1) { 1516 struct sec_entry *e = get_sec_entry(sbi, start); 1517 e->valid_blocks += se->valid_blocks; 1518 } 1519 } 1520 } 1521 1522 static void init_free_segmap(struct f2fs_sb_info *sbi) 1523 { 1524 unsigned int start; 1525 int type; 1526 1527 for (start = 0; start < TOTAL_SEGS(sbi); start++) { 1528 struct seg_entry *sentry = get_seg_entry(sbi, start); 1529 if (!sentry->valid_blocks) 1530 __set_free(sbi, start); 1531 } 1532 1533 /* set use the current segments */ 1534 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { 1535 struct curseg_info *curseg_t = CURSEG_I(sbi, type); 1536 __set_test_and_inuse(sbi, curseg_t->segno); 1537 } 1538 } 1539 1540 static void init_dirty_segmap(struct f2fs_sb_info *sbi) 1541 { 1542 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1543 struct free_segmap_info *free_i = FREE_I(sbi); 1544 unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi); 1545 unsigned short valid_blocks; 1546 1547 while (1) { 1548 /* find dirty segment based on free segmap */ 1549 segno = find_next_inuse(free_i, total_segs, offset); 1550 if (segno >= total_segs) 1551 break; 1552 offset = segno + 1; 1553 valid_blocks = get_valid_blocks(sbi, segno, 0); 1554 if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks) 1555 continue; 1556 mutex_lock(&dirty_i->seglist_lock); 1557 __locate_dirty_segment(sbi, segno, DIRTY); 1558 mutex_unlock(&dirty_i->seglist_lock); 1559 } 1560 } 1561 1562 static int init_victim_secmap(struct f2fs_sb_info *sbi) 1563 { 1564 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1565 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi)); 1566 1567 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL); 1568 if (!dirty_i->victim_secmap) 1569 return -ENOMEM; 1570 return 0; 1571 } 1572 1573 static int build_dirty_segmap(struct f2fs_sb_info *sbi) 1574 { 1575 struct dirty_seglist_info *dirty_i; 1576 unsigned int bitmap_size, i; 1577 1578 /* allocate memory for dirty segments list information */ 1579 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL); 1580 if (!dirty_i) 1581 return -ENOMEM; 1582 1583 SM_I(sbi)->dirty_info = dirty_i; 1584 mutex_init(&dirty_i->seglist_lock); 1585 1586 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); 1587 1588 for (i = 0; i < NR_DIRTY_TYPE; i++) { 1589 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL); 1590 if (!dirty_i->dirty_segmap[i]) 1591 return -ENOMEM; 1592 } 1593 1594 init_dirty_segmap(sbi); 1595 return init_victim_secmap(sbi); 1596 } 1597 1598 /* 1599 * Update min, max modified time for cost-benefit GC algorithm 1600 */ 1601 static void init_min_max_mtime(struct f2fs_sb_info *sbi) 1602 { 1603 struct sit_info *sit_i = SIT_I(sbi); 1604 unsigned int segno; 1605 1606 mutex_lock(&sit_i->sentry_lock); 1607 1608 sit_i->min_mtime = LLONG_MAX; 1609 1610 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) { 1611 unsigned int i; 1612 unsigned long long mtime = 0; 1613 1614 for (i = 0; i < sbi->segs_per_sec; i++) 1615 mtime += get_seg_entry(sbi, segno + i)->mtime; 1616 1617 mtime = div_u64(mtime, sbi->segs_per_sec); 1618 1619 if (sit_i->min_mtime > mtime) 1620 sit_i->min_mtime = mtime; 1621 } 1622 sit_i->max_mtime = get_mtime(sbi); 1623 mutex_unlock(&sit_i->sentry_lock); 1624 } 1625 1626 int build_segment_manager(struct f2fs_sb_info *sbi) 1627 { 1628 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 1629 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1630 struct f2fs_sm_info *sm_info; 1631 int err; 1632 1633 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); 1634 if (!sm_info) 1635 return -ENOMEM; 1636 1637 /* init sm info */ 1638 sbi->sm_info = sm_info; 1639 INIT_LIST_HEAD(&sm_info->wblist_head); 1640 spin_lock_init(&sm_info->wblist_lock); 1641 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 1642 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 1643 sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 1644 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 1645 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 1646 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); 1647 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 1648 sm_info->rec_prefree_segments = DEF_RECLAIM_PREFREE_SEGMENTS; 1649 1650 err = build_sit_info(sbi); 1651 if (err) 1652 return err; 1653 err = build_free_segmap(sbi); 1654 if (err) 1655 return err; 1656 err = build_curseg(sbi); 1657 if (err) 1658 return err; 1659 1660 /* reinit free segmap based on SIT */ 1661 build_sit_entries(sbi); 1662 1663 init_free_segmap(sbi); 1664 err = build_dirty_segmap(sbi); 1665 if (err) 1666 return err; 1667 1668 init_min_max_mtime(sbi); 1669 return 0; 1670 } 1671 1672 static void discard_dirty_segmap(struct f2fs_sb_info *sbi, 1673 enum dirty_type dirty_type) 1674 { 1675 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1676 1677 mutex_lock(&dirty_i->seglist_lock); 1678 kfree(dirty_i->dirty_segmap[dirty_type]); 1679 dirty_i->nr_dirty[dirty_type] = 0; 1680 mutex_unlock(&dirty_i->seglist_lock); 1681 } 1682 1683 static void destroy_victim_secmap(struct f2fs_sb_info *sbi) 1684 { 1685 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1686 kfree(dirty_i->victim_secmap); 1687 } 1688 1689 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) 1690 { 1691 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1692 int i; 1693 1694 if (!dirty_i) 1695 return; 1696 1697 /* discard pre-free/dirty segments list */ 1698 for (i = 0; i < NR_DIRTY_TYPE; i++) 1699 discard_dirty_segmap(sbi, i); 1700 1701 destroy_victim_secmap(sbi); 1702 SM_I(sbi)->dirty_info = NULL; 1703 kfree(dirty_i); 1704 } 1705 1706 static void destroy_curseg(struct f2fs_sb_info *sbi) 1707 { 1708 struct curseg_info *array = SM_I(sbi)->curseg_array; 1709 int i; 1710 1711 if (!array) 1712 return; 1713 SM_I(sbi)->curseg_array = NULL; 1714 for (i = 0; i < NR_CURSEG_TYPE; i++) 1715 kfree(array[i].sum_blk); 1716 kfree(array); 1717 } 1718 1719 static void destroy_free_segmap(struct f2fs_sb_info *sbi) 1720 { 1721 struct free_segmap_info *free_i = SM_I(sbi)->free_info; 1722 if (!free_i) 1723 return; 1724 SM_I(sbi)->free_info = NULL; 1725 kfree(free_i->free_segmap); 1726 kfree(free_i->free_secmap); 1727 kfree(free_i); 1728 } 1729 1730 static void destroy_sit_info(struct f2fs_sb_info *sbi) 1731 { 1732 struct sit_info *sit_i = SIT_I(sbi); 1733 unsigned int start; 1734 1735 if (!sit_i) 1736 return; 1737 1738 if (sit_i->sentries) { 1739 for (start = 0; start < TOTAL_SEGS(sbi); start++) { 1740 kfree(sit_i->sentries[start].cur_valid_map); 1741 kfree(sit_i->sentries[start].ckpt_valid_map); 1742 } 1743 } 1744 vfree(sit_i->sentries); 1745 vfree(sit_i->sec_entries); 1746 kfree(sit_i->dirty_sentries_bitmap); 1747 1748 SM_I(sbi)->sit_info = NULL; 1749 kfree(sit_i->sit_bitmap); 1750 kfree(sit_i); 1751 } 1752 1753 void destroy_segment_manager(struct f2fs_sb_info *sbi) 1754 { 1755 struct f2fs_sm_info *sm_info = SM_I(sbi); 1756 destroy_dirty_segmap(sbi); 1757 destroy_curseg(sbi); 1758 destroy_free_segmap(sbi); 1759 destroy_sit_info(sbi); 1760 sbi->sm_info = NULL; 1761 kfree(sm_info); 1762 } 1763