1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/gc.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/module.h> 10 #include <linux/backing-dev.h> 11 #include <linux/init.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/kthread.h> 14 #include <linux/delay.h> 15 #include <linux/freezer.h> 16 #include <linux/sched/signal.h> 17 18 #include "f2fs.h" 19 #include "node.h" 20 #include "segment.h" 21 #include "gc.h" 22 #include <trace/events/f2fs.h> 23 24 static unsigned int count_bits(const unsigned long *addr, 25 unsigned int offset, unsigned int len); 26 27 static int gc_thread_func(void *data) 28 { 29 struct f2fs_sb_info *sbi = data; 30 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; 31 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; 32 unsigned int wait_ms; 33 34 wait_ms = gc_th->min_sleep_time; 35 36 set_freezable(); 37 do { 38 bool sync_mode; 39 40 wait_event_interruptible_timeout(*wq, 41 kthread_should_stop() || freezing(current) || 42 gc_th->gc_wake, 43 msecs_to_jiffies(wait_ms)); 44 45 /* give it a try one time */ 46 if (gc_th->gc_wake) 47 gc_th->gc_wake = 0; 48 49 if (try_to_freeze()) { 50 stat_other_skip_bggc_count(sbi); 51 continue; 52 } 53 if (kthread_should_stop()) 54 break; 55 56 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { 57 increase_sleep_time(gc_th, &wait_ms); 58 stat_other_skip_bggc_count(sbi); 59 continue; 60 } 61 62 if (time_to_inject(sbi, FAULT_CHECKPOINT)) { 63 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT); 64 f2fs_stop_checkpoint(sbi, false); 65 } 66 67 if (!sb_start_write_trylock(sbi->sb)) { 68 stat_other_skip_bggc_count(sbi); 69 continue; 70 } 71 72 /* 73 * [GC triggering condition] 74 * 0. GC is not conducted currently. 75 * 1. There are enough dirty segments. 76 * 2. IO subsystem is idle by checking the # of writeback pages. 77 * 3. IO subsystem is idle by checking the # of requests in 78 * bdev's request list. 79 * 80 * Note) We have to avoid triggering GCs frequently. 81 * Because it is possible that some segments can be 82 * invalidated soon after by user update or deletion. 83 * So, I'd like to wait some time to collect dirty segments. 84 */ 85 if (sbi->gc_mode == GC_URGENT) { 86 wait_ms = gc_th->urgent_sleep_time; 87 down_write(&sbi->gc_lock); 88 goto do_gc; 89 } 90 91 if (!down_write_trylock(&sbi->gc_lock)) { 92 stat_other_skip_bggc_count(sbi); 93 goto next; 94 } 95 96 if (!is_idle(sbi, GC_TIME)) { 97 increase_sleep_time(gc_th, &wait_ms); 98 up_write(&sbi->gc_lock); 99 stat_io_skip_bggc_count(sbi); 100 goto next; 101 } 102 103 if (has_enough_invalid_blocks(sbi)) 104 decrease_sleep_time(gc_th, &wait_ms); 105 else 106 increase_sleep_time(gc_th, &wait_ms); 107 do_gc: 108 stat_inc_bggc_count(sbi->stat_info); 109 110 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC; 111 112 /* if return value is not zero, no victim was selected */ 113 if (f2fs_gc(sbi, sync_mode, true, NULL_SEGNO)) 114 wait_ms = gc_th->no_gc_sleep_time; 115 116 trace_f2fs_background_gc(sbi->sb, wait_ms, 117 prefree_segments(sbi), free_segments(sbi)); 118 119 /* balancing f2fs's metadata periodically */ 120 f2fs_balance_fs_bg(sbi, true); 121 next: 122 sb_end_write(sbi->sb); 123 124 } while (!kthread_should_stop()); 125 return 0; 126 } 127 128 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi) 129 { 130 struct f2fs_gc_kthread *gc_th; 131 dev_t dev = sbi->sb->s_bdev->bd_dev; 132 int err = 0; 133 134 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL); 135 if (!gc_th) { 136 err = -ENOMEM; 137 goto out; 138 } 139 140 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME; 141 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME; 142 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME; 143 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME; 144 145 gc_th->gc_wake= 0; 146 147 sbi->gc_thread = gc_th; 148 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); 149 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, 150 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); 151 if (IS_ERR(gc_th->f2fs_gc_task)) { 152 err = PTR_ERR(gc_th->f2fs_gc_task); 153 kvfree(gc_th); 154 sbi->gc_thread = NULL; 155 } 156 out: 157 return err; 158 } 159 160 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi) 161 { 162 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; 163 if (!gc_th) 164 return; 165 kthread_stop(gc_th->f2fs_gc_task); 166 kvfree(gc_th); 167 sbi->gc_thread = NULL; 168 } 169 170 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type) 171 { 172 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY; 173 174 switch (sbi->gc_mode) { 175 case GC_IDLE_CB: 176 gc_mode = GC_CB; 177 break; 178 case GC_IDLE_GREEDY: 179 case GC_URGENT: 180 gc_mode = GC_GREEDY; 181 break; 182 } 183 return gc_mode; 184 } 185 186 static void select_policy(struct f2fs_sb_info *sbi, int gc_type, 187 int type, struct victim_sel_policy *p) 188 { 189 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 190 191 if (p->alloc_mode == SSR) { 192 p->gc_mode = GC_GREEDY; 193 p->dirty_bitmap = dirty_i->dirty_segmap[type]; 194 p->max_search = dirty_i->nr_dirty[type]; 195 p->ofs_unit = 1; 196 } else { 197 p->gc_mode = select_gc_type(sbi, gc_type); 198 p->ofs_unit = sbi->segs_per_sec; 199 if (__is_large_section(sbi)) { 200 p->dirty_bitmap = dirty_i->dirty_secmap; 201 p->max_search = count_bits(p->dirty_bitmap, 202 0, MAIN_SECS(sbi)); 203 } else { 204 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY]; 205 p->max_search = dirty_i->nr_dirty[DIRTY]; 206 } 207 } 208 209 /* 210 * adjust candidates range, should select all dirty segments for 211 * foreground GC and urgent GC cases. 212 */ 213 if (gc_type != FG_GC && 214 (sbi->gc_mode != GC_URGENT) && 215 p->max_search > sbi->max_victim_search) 216 p->max_search = sbi->max_victim_search; 217 218 /* let's select beginning hot/small space first in no_heap mode*/ 219 if (test_opt(sbi, NOHEAP) && 220 (type == CURSEG_HOT_DATA || IS_NODESEG(type))) 221 p->offset = 0; 222 else 223 p->offset = SIT_I(sbi)->last_victim[p->gc_mode]; 224 } 225 226 static unsigned int get_max_cost(struct f2fs_sb_info *sbi, 227 struct victim_sel_policy *p) 228 { 229 /* SSR allocates in a segment unit */ 230 if (p->alloc_mode == SSR) 231 return sbi->blocks_per_seg; 232 if (p->gc_mode == GC_GREEDY) 233 return 2 * sbi->blocks_per_seg * p->ofs_unit; 234 else if (p->gc_mode == GC_CB) 235 return UINT_MAX; 236 else /* No other gc_mode */ 237 return 0; 238 } 239 240 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) 241 { 242 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 243 unsigned int secno; 244 245 /* 246 * If the gc_type is FG_GC, we can select victim segments 247 * selected by background GC before. 248 * Those segments guarantee they have small valid blocks. 249 */ 250 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { 251 if (sec_usage_check(sbi, secno)) 252 continue; 253 clear_bit(secno, dirty_i->victim_secmap); 254 return GET_SEG_FROM_SEC(sbi, secno); 255 } 256 return NULL_SEGNO; 257 } 258 259 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) 260 { 261 struct sit_info *sit_i = SIT_I(sbi); 262 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 263 unsigned int start = GET_SEG_FROM_SEC(sbi, secno); 264 unsigned long long mtime = 0; 265 unsigned int vblocks; 266 unsigned char age = 0; 267 unsigned char u; 268 unsigned int i; 269 270 for (i = 0; i < sbi->segs_per_sec; i++) 271 mtime += get_seg_entry(sbi, start + i)->mtime; 272 vblocks = get_valid_blocks(sbi, segno, true); 273 274 mtime = div_u64(mtime, sbi->segs_per_sec); 275 vblocks = div_u64(vblocks, sbi->segs_per_sec); 276 277 u = (vblocks * 100) >> sbi->log_blocks_per_seg; 278 279 /* Handle if the system time has changed by the user */ 280 if (mtime < sit_i->min_mtime) 281 sit_i->min_mtime = mtime; 282 if (mtime > sit_i->max_mtime) 283 sit_i->max_mtime = mtime; 284 if (sit_i->max_mtime != sit_i->min_mtime) 285 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), 286 sit_i->max_mtime - sit_i->min_mtime); 287 288 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); 289 } 290 291 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, 292 unsigned int segno, struct victim_sel_policy *p) 293 { 294 if (p->alloc_mode == SSR) 295 return get_seg_entry(sbi, segno)->ckpt_valid_blocks; 296 297 /* alloc_mode == LFS */ 298 if (p->gc_mode == GC_GREEDY) 299 return get_valid_blocks(sbi, segno, true); 300 else 301 return get_cb_cost(sbi, segno); 302 } 303 304 static unsigned int count_bits(const unsigned long *addr, 305 unsigned int offset, unsigned int len) 306 { 307 unsigned int end = offset + len, sum = 0; 308 309 while (offset < end) { 310 if (test_bit(offset++, addr)) 311 ++sum; 312 } 313 return sum; 314 } 315 316 /* 317 * This function is called from two paths. 318 * One is garbage collection and the other is SSR segment selection. 319 * When it is called during GC, it just gets a victim segment 320 * and it does not remove it from dirty seglist. 321 * When it is called from SSR segment selection, it finds a segment 322 * which has minimum valid blocks and removes it from dirty seglist. 323 */ 324 static int get_victim_by_default(struct f2fs_sb_info *sbi, 325 unsigned int *result, int gc_type, int type, char alloc_mode) 326 { 327 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 328 struct sit_info *sm = SIT_I(sbi); 329 struct victim_sel_policy p; 330 unsigned int secno, last_victim; 331 unsigned int last_segment; 332 unsigned int nsearched = 0; 333 334 mutex_lock(&dirty_i->seglist_lock); 335 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec; 336 337 p.alloc_mode = alloc_mode; 338 select_policy(sbi, gc_type, type, &p); 339 340 p.min_segno = NULL_SEGNO; 341 p.min_cost = get_max_cost(sbi, &p); 342 343 if (*result != NULL_SEGNO) { 344 if (get_valid_blocks(sbi, *result, false) && 345 !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) 346 p.min_segno = *result; 347 goto out; 348 } 349 350 if (p.max_search == 0) 351 goto out; 352 353 if (__is_large_section(sbi) && p.alloc_mode == LFS) { 354 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) { 355 p.min_segno = sbi->next_victim_seg[BG_GC]; 356 *result = p.min_segno; 357 sbi->next_victim_seg[BG_GC] = NULL_SEGNO; 358 goto got_result; 359 } 360 if (gc_type == FG_GC && 361 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) { 362 p.min_segno = sbi->next_victim_seg[FG_GC]; 363 *result = p.min_segno; 364 sbi->next_victim_seg[FG_GC] = NULL_SEGNO; 365 goto got_result; 366 } 367 } 368 369 last_victim = sm->last_victim[p.gc_mode]; 370 if (p.alloc_mode == LFS && gc_type == FG_GC) { 371 p.min_segno = check_bg_victims(sbi); 372 if (p.min_segno != NULL_SEGNO) 373 goto got_it; 374 } 375 376 while (1) { 377 unsigned long cost, *dirty_bitmap; 378 unsigned int unit_no, segno; 379 380 dirty_bitmap = p.dirty_bitmap; 381 unit_no = find_next_bit(dirty_bitmap, 382 last_segment / p.ofs_unit, 383 p.offset / p.ofs_unit); 384 segno = unit_no * p.ofs_unit; 385 if (segno >= last_segment) { 386 if (sm->last_victim[p.gc_mode]) { 387 last_segment = 388 sm->last_victim[p.gc_mode]; 389 sm->last_victim[p.gc_mode] = 0; 390 p.offset = 0; 391 continue; 392 } 393 break; 394 } 395 396 p.offset = segno + p.ofs_unit; 397 nsearched++; 398 399 #ifdef CONFIG_F2FS_CHECK_FS 400 /* 401 * skip selecting the invalid segno (that is failed due to block 402 * validity check failure during GC) to avoid endless GC loop in 403 * such cases. 404 */ 405 if (test_bit(segno, sm->invalid_segmap)) 406 goto next; 407 #endif 408 409 secno = GET_SEC_FROM_SEG(sbi, segno); 410 411 if (sec_usage_check(sbi, secno)) 412 goto next; 413 /* Don't touch checkpointed data */ 414 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 415 get_ckpt_valid_blocks(sbi, segno) && 416 p.alloc_mode != SSR)) 417 goto next; 418 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) 419 goto next; 420 421 cost = get_gc_cost(sbi, segno, &p); 422 423 if (p.min_cost > cost) { 424 p.min_segno = segno; 425 p.min_cost = cost; 426 } 427 next: 428 if (nsearched >= p.max_search) { 429 if (!sm->last_victim[p.gc_mode] && segno <= last_victim) 430 sm->last_victim[p.gc_mode] = 431 last_victim + p.ofs_unit; 432 else 433 sm->last_victim[p.gc_mode] = segno + p.ofs_unit; 434 sm->last_victim[p.gc_mode] %= 435 (MAIN_SECS(sbi) * sbi->segs_per_sec); 436 break; 437 } 438 } 439 if (p.min_segno != NULL_SEGNO) { 440 got_it: 441 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; 442 got_result: 443 if (p.alloc_mode == LFS) { 444 secno = GET_SEC_FROM_SEG(sbi, p.min_segno); 445 if (gc_type == FG_GC) 446 sbi->cur_victim_sec = secno; 447 else 448 set_bit(secno, dirty_i->victim_secmap); 449 } 450 451 } 452 out: 453 if (p.min_segno != NULL_SEGNO) 454 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, 455 sbi->cur_victim_sec, 456 prefree_segments(sbi), free_segments(sbi)); 457 mutex_unlock(&dirty_i->seglist_lock); 458 459 return (p.min_segno == NULL_SEGNO) ? 0 : 1; 460 } 461 462 static const struct victim_selection default_v_ops = { 463 .get_victim = get_victim_by_default, 464 }; 465 466 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino) 467 { 468 struct inode_entry *ie; 469 470 ie = radix_tree_lookup(&gc_list->iroot, ino); 471 if (ie) 472 return ie->inode; 473 return NULL; 474 } 475 476 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode) 477 { 478 struct inode_entry *new_ie; 479 480 if (inode == find_gc_inode(gc_list, inode->i_ino)) { 481 iput(inode); 482 return; 483 } 484 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS); 485 new_ie->inode = inode; 486 487 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie); 488 list_add_tail(&new_ie->list, &gc_list->ilist); 489 } 490 491 static void put_gc_inode(struct gc_inode_list *gc_list) 492 { 493 struct inode_entry *ie, *next_ie; 494 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) { 495 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino); 496 iput(ie->inode); 497 list_del(&ie->list); 498 kmem_cache_free(f2fs_inode_entry_slab, ie); 499 } 500 } 501 502 static int check_valid_map(struct f2fs_sb_info *sbi, 503 unsigned int segno, int offset) 504 { 505 struct sit_info *sit_i = SIT_I(sbi); 506 struct seg_entry *sentry; 507 int ret; 508 509 down_read(&sit_i->sentry_lock); 510 sentry = get_seg_entry(sbi, segno); 511 ret = f2fs_test_bit(offset, sentry->cur_valid_map); 512 up_read(&sit_i->sentry_lock); 513 return ret; 514 } 515 516 /* 517 * This function compares node address got in summary with that in NAT. 518 * On validity, copy that node with cold status, otherwise (invalid node) 519 * ignore that. 520 */ 521 static int gc_node_segment(struct f2fs_sb_info *sbi, 522 struct f2fs_summary *sum, unsigned int segno, int gc_type) 523 { 524 struct f2fs_summary *entry; 525 block_t start_addr; 526 int off; 527 int phase = 0; 528 bool fggc = (gc_type == FG_GC); 529 int submitted = 0; 530 531 start_addr = START_BLOCK(sbi, segno); 532 533 next_step: 534 entry = sum; 535 536 if (fggc && phase == 2) 537 atomic_inc(&sbi->wb_sync_req[NODE]); 538 539 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { 540 nid_t nid = le32_to_cpu(entry->nid); 541 struct page *node_page; 542 struct node_info ni; 543 int err; 544 545 /* stop BG_GC if there is not enough free sections. */ 546 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) 547 return submitted; 548 549 if (check_valid_map(sbi, segno, off) == 0) 550 continue; 551 552 if (phase == 0) { 553 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, 554 META_NAT, true); 555 continue; 556 } 557 558 if (phase == 1) { 559 f2fs_ra_node_page(sbi, nid); 560 continue; 561 } 562 563 /* phase == 2 */ 564 node_page = f2fs_get_node_page(sbi, nid); 565 if (IS_ERR(node_page)) 566 continue; 567 568 /* block may become invalid during f2fs_get_node_page */ 569 if (check_valid_map(sbi, segno, off) == 0) { 570 f2fs_put_page(node_page, 1); 571 continue; 572 } 573 574 if (f2fs_get_node_info(sbi, nid, &ni)) { 575 f2fs_put_page(node_page, 1); 576 continue; 577 } 578 579 if (ni.blk_addr != start_addr + off) { 580 f2fs_put_page(node_page, 1); 581 continue; 582 } 583 584 err = f2fs_move_node_page(node_page, gc_type); 585 if (!err && gc_type == FG_GC) 586 submitted++; 587 stat_inc_node_blk_count(sbi, 1, gc_type); 588 } 589 590 if (++phase < 3) 591 goto next_step; 592 593 if (fggc) 594 atomic_dec(&sbi->wb_sync_req[NODE]); 595 return submitted; 596 } 597 598 /* 599 * Calculate start block index indicating the given node offset. 600 * Be careful, caller should give this node offset only indicating direct node 601 * blocks. If any node offsets, which point the other types of node blocks such 602 * as indirect or double indirect node blocks, are given, it must be a caller's 603 * bug. 604 */ 605 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode) 606 { 607 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; 608 unsigned int bidx; 609 610 if (node_ofs == 0) 611 return 0; 612 613 if (node_ofs <= 2) { 614 bidx = node_ofs - 1; 615 } else if (node_ofs <= indirect_blks) { 616 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); 617 bidx = node_ofs - 2 - dec; 618 } else { 619 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); 620 bidx = node_ofs - 5 - dec; 621 } 622 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode); 623 } 624 625 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 626 struct node_info *dni, block_t blkaddr, unsigned int *nofs) 627 { 628 struct page *node_page; 629 nid_t nid; 630 unsigned int ofs_in_node; 631 block_t source_blkaddr; 632 633 nid = le32_to_cpu(sum->nid); 634 ofs_in_node = le16_to_cpu(sum->ofs_in_node); 635 636 node_page = f2fs_get_node_page(sbi, nid); 637 if (IS_ERR(node_page)) 638 return false; 639 640 if (f2fs_get_node_info(sbi, nid, dni)) { 641 f2fs_put_page(node_page, 1); 642 return false; 643 } 644 645 if (sum->version != dni->version) { 646 f2fs_warn(sbi, "%s: valid data with mismatched node version.", 647 __func__); 648 set_sbi_flag(sbi, SBI_NEED_FSCK); 649 } 650 651 *nofs = ofs_of_node(node_page); 652 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node); 653 f2fs_put_page(node_page, 1); 654 655 if (source_blkaddr != blkaddr) { 656 #ifdef CONFIG_F2FS_CHECK_FS 657 unsigned int segno = GET_SEGNO(sbi, blkaddr); 658 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 659 660 if (unlikely(check_valid_map(sbi, segno, offset))) { 661 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) { 662 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n", 663 blkaddr, source_blkaddr, segno); 664 f2fs_bug_on(sbi, 1); 665 } 666 } 667 #endif 668 return false; 669 } 670 return true; 671 } 672 673 static int ra_data_block(struct inode *inode, pgoff_t index) 674 { 675 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 676 struct address_space *mapping = inode->i_mapping; 677 struct dnode_of_data dn; 678 struct page *page; 679 struct extent_info ei = {0, 0, 0}; 680 struct f2fs_io_info fio = { 681 .sbi = sbi, 682 .ino = inode->i_ino, 683 .type = DATA, 684 .temp = COLD, 685 .op = REQ_OP_READ, 686 .op_flags = 0, 687 .encrypted_page = NULL, 688 .in_list = false, 689 .retry = false, 690 }; 691 int err; 692 693 page = f2fs_grab_cache_page(mapping, index, true); 694 if (!page) 695 return -ENOMEM; 696 697 if (f2fs_lookup_extent_cache(inode, index, &ei)) { 698 dn.data_blkaddr = ei.blk + index - ei.fofs; 699 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, 700 DATA_GENERIC_ENHANCE_READ))) { 701 err = -EFSCORRUPTED; 702 goto put_page; 703 } 704 goto got_it; 705 } 706 707 set_new_dnode(&dn, inode, NULL, NULL, 0); 708 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); 709 if (err) 710 goto put_page; 711 f2fs_put_dnode(&dn); 712 713 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) { 714 err = -ENOENT; 715 goto put_page; 716 } 717 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, 718 DATA_GENERIC_ENHANCE))) { 719 err = -EFSCORRUPTED; 720 goto put_page; 721 } 722 got_it: 723 /* read page */ 724 fio.page = page; 725 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; 726 727 /* 728 * don't cache encrypted data into meta inode until previous dirty 729 * data were writebacked to avoid racing between GC and flush. 730 */ 731 f2fs_wait_on_page_writeback(page, DATA, true, true); 732 733 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); 734 735 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi), 736 dn.data_blkaddr, 737 FGP_LOCK | FGP_CREAT, GFP_NOFS); 738 if (!fio.encrypted_page) { 739 err = -ENOMEM; 740 goto put_page; 741 } 742 743 err = f2fs_submit_page_bio(&fio); 744 if (err) 745 goto put_encrypted_page; 746 f2fs_put_page(fio.encrypted_page, 0); 747 f2fs_put_page(page, 1); 748 749 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); 750 f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); 751 752 return 0; 753 put_encrypted_page: 754 f2fs_put_page(fio.encrypted_page, 1); 755 put_page: 756 f2fs_put_page(page, 1); 757 return err; 758 } 759 760 /* 761 * Move data block via META_MAPPING while keeping locked data page. 762 * This can be used to move blocks, aka LBAs, directly on disk. 763 */ 764 static int move_data_block(struct inode *inode, block_t bidx, 765 int gc_type, unsigned int segno, int off) 766 { 767 struct f2fs_io_info fio = { 768 .sbi = F2FS_I_SB(inode), 769 .ino = inode->i_ino, 770 .type = DATA, 771 .temp = COLD, 772 .op = REQ_OP_READ, 773 .op_flags = 0, 774 .encrypted_page = NULL, 775 .in_list = false, 776 .retry = false, 777 }; 778 struct dnode_of_data dn; 779 struct f2fs_summary sum; 780 struct node_info ni; 781 struct page *page, *mpage; 782 block_t newaddr; 783 int err = 0; 784 bool lfs_mode = f2fs_lfs_mode(fio.sbi); 785 786 /* do not read out */ 787 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false); 788 if (!page) 789 return -ENOMEM; 790 791 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) { 792 err = -ENOENT; 793 goto out; 794 } 795 796 if (f2fs_is_atomic_file(inode)) { 797 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++; 798 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++; 799 err = -EAGAIN; 800 goto out; 801 } 802 803 if (f2fs_is_pinned_file(inode)) { 804 f2fs_pin_file_control(inode, true); 805 err = -EAGAIN; 806 goto out; 807 } 808 809 set_new_dnode(&dn, inode, NULL, NULL, 0); 810 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE); 811 if (err) 812 goto out; 813 814 if (unlikely(dn.data_blkaddr == NULL_ADDR)) { 815 ClearPageUptodate(page); 816 err = -ENOENT; 817 goto put_out; 818 } 819 820 /* 821 * don't cache encrypted data into meta inode until previous dirty 822 * data were writebacked to avoid racing between GC and flush. 823 */ 824 f2fs_wait_on_page_writeback(page, DATA, true, true); 825 826 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); 827 828 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni); 829 if (err) 830 goto put_out; 831 832 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); 833 834 /* read page */ 835 fio.page = page; 836 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; 837 838 if (lfs_mode) 839 down_write(&fio.sbi->io_order_lock); 840 841 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), 842 fio.old_blkaddr, false); 843 if (!mpage) 844 goto up_out; 845 846 fio.encrypted_page = mpage; 847 848 /* read source block in mpage */ 849 if (!PageUptodate(mpage)) { 850 err = f2fs_submit_page_bio(&fio); 851 if (err) { 852 f2fs_put_page(mpage, 1); 853 goto up_out; 854 } 855 856 f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); 857 f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); 858 859 lock_page(mpage); 860 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || 861 !PageUptodate(mpage))) { 862 err = -EIO; 863 f2fs_put_page(mpage, 1); 864 goto up_out; 865 } 866 } 867 868 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, 869 &sum, CURSEG_COLD_DATA, NULL); 870 871 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), 872 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); 873 if (!fio.encrypted_page) { 874 err = -ENOMEM; 875 f2fs_put_page(mpage, 1); 876 goto recover_block; 877 } 878 879 /* write target block */ 880 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true); 881 memcpy(page_address(fio.encrypted_page), 882 page_address(mpage), PAGE_SIZE); 883 f2fs_put_page(mpage, 1); 884 invalidate_mapping_pages(META_MAPPING(fio.sbi), 885 fio.old_blkaddr, fio.old_blkaddr); 886 887 set_page_dirty(fio.encrypted_page); 888 if (clear_page_dirty_for_io(fio.encrypted_page)) 889 dec_page_count(fio.sbi, F2FS_DIRTY_META); 890 891 set_page_writeback(fio.encrypted_page); 892 ClearPageError(page); 893 894 /* allocate block address */ 895 f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true); 896 897 fio.op = REQ_OP_WRITE; 898 fio.op_flags = REQ_SYNC; 899 fio.new_blkaddr = newaddr; 900 f2fs_submit_page_write(&fio); 901 if (fio.retry) { 902 err = -EAGAIN; 903 if (PageWriteback(fio.encrypted_page)) 904 end_page_writeback(fio.encrypted_page); 905 goto put_page_out; 906 } 907 908 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE); 909 910 f2fs_update_data_blkaddr(&dn, newaddr); 911 set_inode_flag(inode, FI_APPEND_WRITE); 912 if (page->index == 0) 913 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); 914 put_page_out: 915 f2fs_put_page(fio.encrypted_page, 1); 916 recover_block: 917 if (err) 918 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, 919 true, true); 920 up_out: 921 if (lfs_mode) 922 up_write(&fio.sbi->io_order_lock); 923 put_out: 924 f2fs_put_dnode(&dn); 925 out: 926 f2fs_put_page(page, 1); 927 return err; 928 } 929 930 static int move_data_page(struct inode *inode, block_t bidx, int gc_type, 931 unsigned int segno, int off) 932 { 933 struct page *page; 934 int err = 0; 935 936 page = f2fs_get_lock_data_page(inode, bidx, true); 937 if (IS_ERR(page)) 938 return PTR_ERR(page); 939 940 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) { 941 err = -ENOENT; 942 goto out; 943 } 944 945 if (f2fs_is_atomic_file(inode)) { 946 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++; 947 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++; 948 err = -EAGAIN; 949 goto out; 950 } 951 if (f2fs_is_pinned_file(inode)) { 952 if (gc_type == FG_GC) 953 f2fs_pin_file_control(inode, true); 954 err = -EAGAIN; 955 goto out; 956 } 957 958 if (gc_type == BG_GC) { 959 if (PageWriteback(page)) { 960 err = -EAGAIN; 961 goto out; 962 } 963 set_page_dirty(page); 964 set_cold_data(page); 965 } else { 966 struct f2fs_io_info fio = { 967 .sbi = F2FS_I_SB(inode), 968 .ino = inode->i_ino, 969 .type = DATA, 970 .temp = COLD, 971 .op = REQ_OP_WRITE, 972 .op_flags = REQ_SYNC, 973 .old_blkaddr = NULL_ADDR, 974 .page = page, 975 .encrypted_page = NULL, 976 .need_lock = LOCK_REQ, 977 .io_type = FS_GC_DATA_IO, 978 }; 979 bool is_dirty = PageDirty(page); 980 981 retry: 982 f2fs_wait_on_page_writeback(page, DATA, true, true); 983 984 set_page_dirty(page); 985 if (clear_page_dirty_for_io(page)) { 986 inode_dec_dirty_pages(inode); 987 f2fs_remove_dirty_inode(inode); 988 } 989 990 set_cold_data(page); 991 992 err = f2fs_do_write_data_page(&fio); 993 if (err) { 994 clear_cold_data(page); 995 if (err == -ENOMEM) { 996 congestion_wait(BLK_RW_ASYNC, 997 DEFAULT_IO_TIMEOUT); 998 goto retry; 999 } 1000 if (is_dirty) 1001 set_page_dirty(page); 1002 } 1003 } 1004 out: 1005 f2fs_put_page(page, 1); 1006 return err; 1007 } 1008 1009 /* 1010 * This function tries to get parent node of victim data block, and identifies 1011 * data block validity. If the block is valid, copy that with cold status and 1012 * modify parent node. 1013 * If the parent node is not valid or the data block address is different, 1014 * the victim data block is ignored. 1015 */ 1016 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 1017 struct gc_inode_list *gc_list, unsigned int segno, int gc_type) 1018 { 1019 struct super_block *sb = sbi->sb; 1020 struct f2fs_summary *entry; 1021 block_t start_addr; 1022 int off; 1023 int phase = 0; 1024 int submitted = 0; 1025 1026 start_addr = START_BLOCK(sbi, segno); 1027 1028 next_step: 1029 entry = sum; 1030 1031 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { 1032 struct page *data_page; 1033 struct inode *inode; 1034 struct node_info dni; /* dnode info for the data */ 1035 unsigned int ofs_in_node, nofs; 1036 block_t start_bidx; 1037 nid_t nid = le32_to_cpu(entry->nid); 1038 1039 /* 1040 * stop BG_GC if there is not enough free sections. 1041 * Or, stop GC if the segment becomes fully valid caused by 1042 * race condition along with SSR block allocation. 1043 */ 1044 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || 1045 get_valid_blocks(sbi, segno, true) == 1046 BLKS_PER_SEC(sbi)) 1047 return submitted; 1048 1049 if (check_valid_map(sbi, segno, off) == 0) 1050 continue; 1051 1052 if (phase == 0) { 1053 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, 1054 META_NAT, true); 1055 continue; 1056 } 1057 1058 if (phase == 1) { 1059 f2fs_ra_node_page(sbi, nid); 1060 continue; 1061 } 1062 1063 /* Get an inode by ino with checking validity */ 1064 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs)) 1065 continue; 1066 1067 if (phase == 2) { 1068 f2fs_ra_node_page(sbi, dni.ino); 1069 continue; 1070 } 1071 1072 ofs_in_node = le16_to_cpu(entry->ofs_in_node); 1073 1074 if (phase == 3) { 1075 inode = f2fs_iget(sb, dni.ino); 1076 if (IS_ERR(inode) || is_bad_inode(inode)) { 1077 set_sbi_flag(sbi, SBI_NEED_FSCK); 1078 continue; 1079 } 1080 1081 if (!down_write_trylock( 1082 &F2FS_I(inode)->i_gc_rwsem[WRITE])) { 1083 iput(inode); 1084 sbi->skipped_gc_rwsem++; 1085 continue; 1086 } 1087 1088 start_bidx = f2fs_start_bidx_of_node(nofs, inode) + 1089 ofs_in_node; 1090 1091 if (f2fs_post_read_required(inode)) { 1092 int err = ra_data_block(inode, start_bidx); 1093 1094 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1095 if (err) { 1096 iput(inode); 1097 continue; 1098 } 1099 add_gc_inode(gc_list, inode); 1100 continue; 1101 } 1102 1103 data_page = f2fs_get_read_data_page(inode, 1104 start_bidx, REQ_RAHEAD, true); 1105 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1106 if (IS_ERR(data_page)) { 1107 iput(inode); 1108 continue; 1109 } 1110 1111 f2fs_put_page(data_page, 0); 1112 add_gc_inode(gc_list, inode); 1113 continue; 1114 } 1115 1116 /* phase 4 */ 1117 inode = find_gc_inode(gc_list, dni.ino); 1118 if (inode) { 1119 struct f2fs_inode_info *fi = F2FS_I(inode); 1120 bool locked = false; 1121 int err; 1122 1123 if (S_ISREG(inode->i_mode)) { 1124 if (!down_write_trylock(&fi->i_gc_rwsem[READ])) 1125 continue; 1126 if (!down_write_trylock( 1127 &fi->i_gc_rwsem[WRITE])) { 1128 sbi->skipped_gc_rwsem++; 1129 up_write(&fi->i_gc_rwsem[READ]); 1130 continue; 1131 } 1132 locked = true; 1133 1134 /* wait for all inflight aio data */ 1135 inode_dio_wait(inode); 1136 } 1137 1138 start_bidx = f2fs_start_bidx_of_node(nofs, inode) 1139 + ofs_in_node; 1140 if (f2fs_post_read_required(inode)) 1141 err = move_data_block(inode, start_bidx, 1142 gc_type, segno, off); 1143 else 1144 err = move_data_page(inode, start_bidx, gc_type, 1145 segno, off); 1146 1147 if (!err && (gc_type == FG_GC || 1148 f2fs_post_read_required(inode))) 1149 submitted++; 1150 1151 if (locked) { 1152 up_write(&fi->i_gc_rwsem[WRITE]); 1153 up_write(&fi->i_gc_rwsem[READ]); 1154 } 1155 1156 stat_inc_data_blk_count(sbi, 1, gc_type); 1157 } 1158 } 1159 1160 if (++phase < 5) 1161 goto next_step; 1162 1163 return submitted; 1164 } 1165 1166 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, 1167 int gc_type) 1168 { 1169 struct sit_info *sit_i = SIT_I(sbi); 1170 int ret; 1171 1172 down_write(&sit_i->sentry_lock); 1173 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, 1174 NO_CHECK_TYPE, LFS); 1175 up_write(&sit_i->sentry_lock); 1176 return ret; 1177 } 1178 1179 static int do_garbage_collect(struct f2fs_sb_info *sbi, 1180 unsigned int start_segno, 1181 struct gc_inode_list *gc_list, int gc_type) 1182 { 1183 struct page *sum_page; 1184 struct f2fs_summary_block *sum; 1185 struct blk_plug plug; 1186 unsigned int segno = start_segno; 1187 unsigned int end_segno = start_segno + sbi->segs_per_sec; 1188 int seg_freed = 0, migrated = 0; 1189 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? 1190 SUM_TYPE_DATA : SUM_TYPE_NODE; 1191 int submitted = 0; 1192 1193 if (__is_large_section(sbi)) 1194 end_segno = rounddown(end_segno, sbi->segs_per_sec); 1195 1196 /* readahead multi ssa blocks those have contiguous address */ 1197 if (__is_large_section(sbi)) 1198 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), 1199 end_segno - segno, META_SSA, true); 1200 1201 /* reference all summary page */ 1202 while (segno < end_segno) { 1203 sum_page = f2fs_get_sum_page(sbi, segno++); 1204 if (IS_ERR(sum_page)) { 1205 int err = PTR_ERR(sum_page); 1206 1207 end_segno = segno - 1; 1208 for (segno = start_segno; segno < end_segno; segno++) { 1209 sum_page = find_get_page(META_MAPPING(sbi), 1210 GET_SUM_BLOCK(sbi, segno)); 1211 f2fs_put_page(sum_page, 0); 1212 f2fs_put_page(sum_page, 0); 1213 } 1214 return err; 1215 } 1216 unlock_page(sum_page); 1217 } 1218 1219 blk_start_plug(&plug); 1220 1221 for (segno = start_segno; segno < end_segno; segno++) { 1222 1223 /* find segment summary of victim */ 1224 sum_page = find_get_page(META_MAPPING(sbi), 1225 GET_SUM_BLOCK(sbi, segno)); 1226 f2fs_put_page(sum_page, 0); 1227 1228 if (get_valid_blocks(sbi, segno, false) == 0) 1229 goto freed; 1230 if (gc_type == BG_GC && __is_large_section(sbi) && 1231 migrated >= sbi->migration_granularity) 1232 goto skip; 1233 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi))) 1234 goto skip; 1235 1236 sum = page_address(sum_page); 1237 if (type != GET_SUM_TYPE((&sum->footer))) { 1238 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT", 1239 segno, type, GET_SUM_TYPE((&sum->footer))); 1240 set_sbi_flag(sbi, SBI_NEED_FSCK); 1241 f2fs_stop_checkpoint(sbi, false); 1242 goto skip; 1243 } 1244 1245 /* 1246 * this is to avoid deadlock: 1247 * - lock_page(sum_page) - f2fs_replace_block 1248 * - check_valid_map() - down_write(sentry_lock) 1249 * - down_read(sentry_lock) - change_curseg() 1250 * - lock_page(sum_page) 1251 */ 1252 if (type == SUM_TYPE_NODE) 1253 submitted += gc_node_segment(sbi, sum->entries, segno, 1254 gc_type); 1255 else 1256 submitted += gc_data_segment(sbi, sum->entries, gc_list, 1257 segno, gc_type); 1258 1259 stat_inc_seg_count(sbi, type, gc_type); 1260 migrated++; 1261 1262 freed: 1263 if (gc_type == FG_GC && 1264 get_valid_blocks(sbi, segno, false) == 0) 1265 seg_freed++; 1266 1267 if (__is_large_section(sbi) && segno + 1 < end_segno) 1268 sbi->next_victim_seg[gc_type] = segno + 1; 1269 skip: 1270 f2fs_put_page(sum_page, 0); 1271 } 1272 1273 if (submitted) 1274 f2fs_submit_merged_write(sbi, 1275 (type == SUM_TYPE_NODE) ? NODE : DATA); 1276 1277 blk_finish_plug(&plug); 1278 1279 stat_inc_call_count(sbi->stat_info); 1280 1281 return seg_freed; 1282 } 1283 1284 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, 1285 bool background, unsigned int segno) 1286 { 1287 int gc_type = sync ? FG_GC : BG_GC; 1288 int sec_freed = 0, seg_freed = 0, total_freed = 0; 1289 int ret = 0; 1290 struct cp_control cpc; 1291 unsigned int init_segno = segno; 1292 struct gc_inode_list gc_list = { 1293 .ilist = LIST_HEAD_INIT(gc_list.ilist), 1294 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), 1295 }; 1296 unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC]; 1297 unsigned long long first_skipped; 1298 unsigned int skipped_round = 0, round = 0; 1299 1300 trace_f2fs_gc_begin(sbi->sb, sync, background, 1301 get_pages(sbi, F2FS_DIRTY_NODES), 1302 get_pages(sbi, F2FS_DIRTY_DENTS), 1303 get_pages(sbi, F2FS_DIRTY_IMETA), 1304 free_sections(sbi), 1305 free_segments(sbi), 1306 reserved_segments(sbi), 1307 prefree_segments(sbi)); 1308 1309 cpc.reason = __get_cp_reason(sbi); 1310 sbi->skipped_gc_rwsem = 0; 1311 first_skipped = last_skipped; 1312 gc_more: 1313 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) { 1314 ret = -EINVAL; 1315 goto stop; 1316 } 1317 if (unlikely(f2fs_cp_error(sbi))) { 1318 ret = -EIO; 1319 goto stop; 1320 } 1321 1322 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) { 1323 /* 1324 * For example, if there are many prefree_segments below given 1325 * threshold, we can make them free by checkpoint. Then, we 1326 * secure free segments which doesn't need fggc any more. 1327 */ 1328 if (prefree_segments(sbi) && 1329 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { 1330 ret = f2fs_write_checkpoint(sbi, &cpc); 1331 if (ret) 1332 goto stop; 1333 } 1334 if (has_not_enough_free_secs(sbi, 0, 0)) 1335 gc_type = FG_GC; 1336 } 1337 1338 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */ 1339 if (gc_type == BG_GC && !background) { 1340 ret = -EINVAL; 1341 goto stop; 1342 } 1343 if (!__get_victim(sbi, &segno, gc_type)) { 1344 ret = -ENODATA; 1345 goto stop; 1346 } 1347 1348 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type); 1349 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec) 1350 sec_freed++; 1351 total_freed += seg_freed; 1352 1353 if (gc_type == FG_GC) { 1354 if (sbi->skipped_atomic_files[FG_GC] > last_skipped || 1355 sbi->skipped_gc_rwsem) 1356 skipped_round++; 1357 last_skipped = sbi->skipped_atomic_files[FG_GC]; 1358 round++; 1359 } 1360 1361 if (gc_type == FG_GC && seg_freed) 1362 sbi->cur_victim_sec = NULL_SEGNO; 1363 1364 if (sync) 1365 goto stop; 1366 1367 if (has_not_enough_free_secs(sbi, sec_freed, 0)) { 1368 if (skipped_round <= MAX_SKIP_GC_COUNT || 1369 skipped_round * 2 < round) { 1370 segno = NULL_SEGNO; 1371 goto gc_more; 1372 } 1373 1374 if (first_skipped < last_skipped && 1375 (last_skipped - first_skipped) > 1376 sbi->skipped_gc_rwsem) { 1377 f2fs_drop_inmem_pages_all(sbi, true); 1378 segno = NULL_SEGNO; 1379 goto gc_more; 1380 } 1381 if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) 1382 ret = f2fs_write_checkpoint(sbi, &cpc); 1383 } 1384 stop: 1385 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0; 1386 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno; 1387 1388 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed, 1389 get_pages(sbi, F2FS_DIRTY_NODES), 1390 get_pages(sbi, F2FS_DIRTY_DENTS), 1391 get_pages(sbi, F2FS_DIRTY_IMETA), 1392 free_sections(sbi), 1393 free_segments(sbi), 1394 reserved_segments(sbi), 1395 prefree_segments(sbi)); 1396 1397 up_write(&sbi->gc_lock); 1398 1399 put_gc_inode(&gc_list); 1400 1401 if (sync && !ret) 1402 ret = sec_freed ? 0 : -EAGAIN; 1403 return ret; 1404 } 1405 1406 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) 1407 { 1408 DIRTY_I(sbi)->v_ops = &default_v_ops; 1409 1410 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; 1411 1412 /* give warm/cold data area from slower device */ 1413 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi)) 1414 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 1415 GET_SEGNO(sbi, FDEV(0).end_blk) + 1; 1416 } 1417 1418 static int free_segment_range(struct f2fs_sb_info *sbi, 1419 unsigned int secs, bool gc_only) 1420 { 1421 unsigned int segno, next_inuse, start, end; 1422 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; 1423 int gc_mode, gc_type; 1424 int err = 0; 1425 int type; 1426 1427 /* Force block allocation for GC */ 1428 MAIN_SECS(sbi) -= secs; 1429 start = MAIN_SECS(sbi) * sbi->segs_per_sec; 1430 end = MAIN_SEGS(sbi) - 1; 1431 1432 mutex_lock(&DIRTY_I(sbi)->seglist_lock); 1433 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++) 1434 if (SIT_I(sbi)->last_victim[gc_mode] >= start) 1435 SIT_I(sbi)->last_victim[gc_mode] = 0; 1436 1437 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++) 1438 if (sbi->next_victim_seg[gc_type] >= start) 1439 sbi->next_victim_seg[gc_type] = NULL_SEGNO; 1440 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); 1441 1442 /* Move out cursegs from the target range */ 1443 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++) 1444 f2fs_allocate_segment_for_resize(sbi, type, start, end); 1445 1446 /* do GC to move out valid blocks in the range */ 1447 for (segno = start; segno <= end; segno += sbi->segs_per_sec) { 1448 struct gc_inode_list gc_list = { 1449 .ilist = LIST_HEAD_INIT(gc_list.ilist), 1450 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), 1451 }; 1452 1453 do_garbage_collect(sbi, segno, &gc_list, FG_GC); 1454 put_gc_inode(&gc_list); 1455 1456 if (!gc_only && get_valid_blocks(sbi, segno, true)) { 1457 err = -EAGAIN; 1458 goto out; 1459 } 1460 if (fatal_signal_pending(current)) { 1461 err = -ERESTARTSYS; 1462 goto out; 1463 } 1464 } 1465 if (gc_only) 1466 goto out; 1467 1468 err = f2fs_write_checkpoint(sbi, &cpc); 1469 if (err) 1470 goto out; 1471 1472 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start); 1473 if (next_inuse <= end) { 1474 f2fs_err(sbi, "segno %u should be free but still inuse!", 1475 next_inuse); 1476 f2fs_bug_on(sbi, 1); 1477 } 1478 out: 1479 MAIN_SECS(sbi) += secs; 1480 return err; 1481 } 1482 1483 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) 1484 { 1485 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); 1486 int section_count; 1487 int segment_count; 1488 int segment_count_main; 1489 long long block_count; 1490 int segs = secs * sbi->segs_per_sec; 1491 1492 down_write(&sbi->sb_lock); 1493 1494 section_count = le32_to_cpu(raw_sb->section_count); 1495 segment_count = le32_to_cpu(raw_sb->segment_count); 1496 segment_count_main = le32_to_cpu(raw_sb->segment_count_main); 1497 block_count = le64_to_cpu(raw_sb->block_count); 1498 1499 raw_sb->section_count = cpu_to_le32(section_count + secs); 1500 raw_sb->segment_count = cpu_to_le32(segment_count + segs); 1501 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs); 1502 raw_sb->block_count = cpu_to_le64(block_count + 1503 (long long)segs * sbi->blocks_per_seg); 1504 if (f2fs_is_multi_device(sbi)) { 1505 int last_dev = sbi->s_ndevs - 1; 1506 int dev_segs = 1507 le32_to_cpu(raw_sb->devs[last_dev].total_segments); 1508 1509 raw_sb->devs[last_dev].total_segments = 1510 cpu_to_le32(dev_segs + segs); 1511 } 1512 1513 up_write(&sbi->sb_lock); 1514 } 1515 1516 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) 1517 { 1518 int segs = secs * sbi->segs_per_sec; 1519 long long blks = (long long)segs * sbi->blocks_per_seg; 1520 long long user_block_count = 1521 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count); 1522 1523 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs; 1524 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs; 1525 MAIN_SECS(sbi) += secs; 1526 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs; 1527 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs; 1528 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks); 1529 1530 if (f2fs_is_multi_device(sbi)) { 1531 int last_dev = sbi->s_ndevs - 1; 1532 1533 FDEV(last_dev).total_segments = 1534 (int)FDEV(last_dev).total_segments + segs; 1535 FDEV(last_dev).end_blk = 1536 (long long)FDEV(last_dev).end_blk + blks; 1537 #ifdef CONFIG_BLK_DEV_ZONED 1538 FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz + 1539 (int)(blks >> sbi->log_blocks_per_blkz); 1540 #endif 1541 } 1542 } 1543 1544 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) 1545 { 1546 __u64 old_block_count, shrunk_blocks; 1547 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; 1548 unsigned int secs; 1549 int err = 0; 1550 __u32 rem; 1551 1552 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count); 1553 if (block_count > old_block_count) 1554 return -EINVAL; 1555 1556 if (f2fs_is_multi_device(sbi)) { 1557 int last_dev = sbi->s_ndevs - 1; 1558 __u64 last_segs = FDEV(last_dev).total_segments; 1559 1560 if (block_count + last_segs * sbi->blocks_per_seg <= 1561 old_block_count) 1562 return -EINVAL; 1563 } 1564 1565 /* new fs size should align to section size */ 1566 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem); 1567 if (rem) 1568 return -EINVAL; 1569 1570 if (block_count == old_block_count) 1571 return 0; 1572 1573 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { 1574 f2fs_err(sbi, "Should run fsck to repair first."); 1575 return -EFSCORRUPTED; 1576 } 1577 1578 if (test_opt(sbi, DISABLE_CHECKPOINT)) { 1579 f2fs_err(sbi, "Checkpoint should be enabled."); 1580 return -EINVAL; 1581 } 1582 1583 shrunk_blocks = old_block_count - block_count; 1584 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi)); 1585 1586 /* stop other GC */ 1587 if (!down_write_trylock(&sbi->gc_lock)) 1588 return -EAGAIN; 1589 1590 /* stop CP to protect MAIN_SEC in free_segment_range */ 1591 f2fs_lock_op(sbi); 1592 err = free_segment_range(sbi, secs, true); 1593 f2fs_unlock_op(sbi); 1594 up_write(&sbi->gc_lock); 1595 if (err) 1596 return err; 1597 1598 set_sbi_flag(sbi, SBI_IS_RESIZEFS); 1599 1600 freeze_super(sbi->sb); 1601 down_write(&sbi->gc_lock); 1602 mutex_lock(&sbi->cp_mutex); 1603 1604 spin_lock(&sbi->stat_lock); 1605 if (shrunk_blocks + valid_user_blocks(sbi) + 1606 sbi->current_reserved_blocks + sbi->unusable_block_count + 1607 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) 1608 err = -ENOSPC; 1609 else 1610 sbi->user_block_count -= shrunk_blocks; 1611 spin_unlock(&sbi->stat_lock); 1612 if (err) 1613 goto out_err; 1614 1615 err = free_segment_range(sbi, secs, false); 1616 if (err) 1617 goto recover_out; 1618 1619 update_sb_metadata(sbi, -secs); 1620 1621 err = f2fs_commit_super(sbi, false); 1622 if (err) { 1623 update_sb_metadata(sbi, secs); 1624 goto recover_out; 1625 } 1626 1627 update_fs_metadata(sbi, -secs); 1628 clear_sbi_flag(sbi, SBI_IS_RESIZEFS); 1629 set_sbi_flag(sbi, SBI_IS_DIRTY); 1630 1631 err = f2fs_write_checkpoint(sbi, &cpc); 1632 if (err) { 1633 update_fs_metadata(sbi, secs); 1634 update_sb_metadata(sbi, secs); 1635 f2fs_commit_super(sbi, false); 1636 } 1637 recover_out: 1638 if (err) { 1639 set_sbi_flag(sbi, SBI_NEED_FSCK); 1640 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); 1641 1642 spin_lock(&sbi->stat_lock); 1643 sbi->user_block_count += shrunk_blocks; 1644 spin_unlock(&sbi->stat_lock); 1645 } 1646 out_err: 1647 mutex_unlock(&sbi->cp_mutex); 1648 up_write(&sbi->gc_lock); 1649 thaw_super(sbi->sb); 1650 clear_sbi_flag(sbi, SBI_IS_RESIZEFS); 1651 return err; 1652 } 1653