gc.c (e5451c8f8330e03ad3cfa16048b4daf961af434f) gc.c (4356e48e64374ceac6e4313244eb65158a954b40)
1/*
2 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 231 unchanged lines hidden (view full) ---

240
241 /* alloc_mode == LFS */
242 if (p->gc_mode == GC_GREEDY)
243 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
244 else
245 return get_cb_cost(sbi, segno);
246}
247
1/*
2 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 231 unchanged lines hidden (view full) ---

240
241 /* alloc_mode == LFS */
242 if (p->gc_mode == GC_GREEDY)
243 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
244 else
245 return get_cb_cost(sbi, segno);
246}
247
248static unsigned int count_bits(const unsigned long *addr,
249 unsigned int offset, unsigned int len)
250{
251 unsigned int end = offset + len, sum = 0;
252
253 while (offset < end) {
254 if (test_bit(offset++, addr))
255 ++sum;
256 }
257 return sum;
258}
259
248/*
249 * This function is called from two paths.
250 * One is garbage collection and the other is SSR segment selection.
251 * When it is called during GC, it just gets a victim segment
252 * and it does not remove it from dirty seglist.
253 * When it is called from SSR segment selection, it finds a segment
254 * which has minimum valid blocks and removes it from dirty seglist.
255 */
256static int get_victim_by_default(struct f2fs_sb_info *sbi,
257 unsigned int *result, int gc_type, int type, char alloc_mode)
258{
259 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
260 struct victim_sel_policy p;
260/*
261 * This function is called from two paths.
262 * One is garbage collection and the other is SSR segment selection.
263 * When it is called during GC, it just gets a victim segment
264 * and it does not remove it from dirty seglist.
265 * When it is called from SSR segment selection, it finds a segment
266 * which has minimum valid blocks and removes it from dirty seglist.
267 */
268static int get_victim_by_default(struct f2fs_sb_info *sbi,
269 unsigned int *result, int gc_type, int type, char alloc_mode)
270{
271 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
272 struct victim_sel_policy p;
261 unsigned int secno, max_cost;
273 unsigned int secno, max_cost, last_victim;
262 unsigned int last_segment = MAIN_SEGS(sbi);
274 unsigned int last_segment = MAIN_SEGS(sbi);
263 int nsearched = 0;
275 unsigned int nsearched = 0;
264
265 mutex_lock(&dirty_i->seglist_lock);
266
267 p.alloc_mode = alloc_mode;
268 select_policy(sbi, gc_type, type, &p);
269
270 p.min_segno = NULL_SEGNO;
271 p.min_cost = max_cost = get_max_cost(sbi, &p);
272
273 if (p.max_search == 0)
274 goto out;
275
276
277 mutex_lock(&dirty_i->seglist_lock);
278
279 p.alloc_mode = alloc_mode;
280 select_policy(sbi, gc_type, type, &p);
281
282 p.min_segno = NULL_SEGNO;
283 p.min_cost = max_cost = get_max_cost(sbi, &p);
284
285 if (p.max_search == 0)
286 goto out;
287
288 last_victim = sbi->last_victim[p.gc_mode];
276 if (p.alloc_mode == LFS && gc_type == FG_GC) {
277 p.min_segno = check_bg_victims(sbi);
278 if (p.min_segno != NULL_SEGNO)
279 goto got_it;
280 }
281
282 while (1) {
283 unsigned long cost;

--- 6 unchanged lines hidden (view full) ---

290 sbi->last_victim[p.gc_mode] = 0;
291 p.offset = 0;
292 continue;
293 }
294 break;
295 }
296
297 p.offset = segno + p.ofs_unit;
289 if (p.alloc_mode == LFS && gc_type == FG_GC) {
290 p.min_segno = check_bg_victims(sbi);
291 if (p.min_segno != NULL_SEGNO)
292 goto got_it;
293 }
294
295 while (1) {
296 unsigned long cost;

--- 6 unchanged lines hidden (view full) ---

303 sbi->last_victim[p.gc_mode] = 0;
304 p.offset = 0;
305 continue;
306 }
307 break;
308 }
309
310 p.offset = segno + p.ofs_unit;
298 if (p.ofs_unit > 1)
311 if (p.ofs_unit > 1) {
299 p.offset -= segno % p.ofs_unit;
312 p.offset -= segno % p.ofs_unit;
313 nsearched += count_bits(p.dirty_segmap,
314 p.offset - p.ofs_unit,
315 p.ofs_unit);
316 } else {
317 nsearched++;
318 }
300
319
320
301 secno = GET_SECNO(sbi, segno);
302
303 if (sec_usage_check(sbi, secno))
321 secno = GET_SECNO(sbi, segno);
322
323 if (sec_usage_check(sbi, secno))
304 continue;
324 goto next;
305 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
325 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
306 continue;
326 goto next;
307
308 cost = get_gc_cost(sbi, segno, &p);
309
310 if (p.min_cost > cost) {
311 p.min_segno = segno;
312 p.min_cost = cost;
327
328 cost = get_gc_cost(sbi, segno, &p);
329
330 if (p.min_cost > cost) {
331 p.min_segno = segno;
332 p.min_cost = cost;
313 } else if (unlikely(cost == max_cost)) {
314 continue;
315 }
333 }
316
317 if (nsearched++ >= p.max_search) {
318 sbi->last_victim[p.gc_mode] = segno;
334next:
335 if (nsearched >= p.max_search) {
336 if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
337 sbi->last_victim[p.gc_mode] = last_victim + 1;
338 else
339 sbi->last_victim[p.gc_mode] = segno + 1;
319 break;
320 }
321 }
322 if (p.min_segno != NULL_SEGNO) {
323got_it:
324 if (p.alloc_mode == LFS) {
325 secno = GET_SECNO(sbi, p.min_segno);
326 if (gc_type == FG_GC)

--- 67 unchanged lines hidden (view full) ---

394 return ret;
395}
396
397/*
398 * This function compares node address got in summary with that in NAT.
399 * On validity, copy that node with cold status, otherwise (invalid node)
400 * ignore that.
401 */
340 break;
341 }
342 }
343 if (p.min_segno != NULL_SEGNO) {
344got_it:
345 if (p.alloc_mode == LFS) {
346 secno = GET_SECNO(sbi, p.min_segno);
347 if (gc_type == FG_GC)

--- 67 unchanged lines hidden (view full) ---

415 return ret;
416}
417
418/*
419 * This function compares node address got in summary with that in NAT.
420 * On validity, copy that node with cold status, otherwise (invalid node)
421 * ignore that.
422 */
402static int gc_node_segment(struct f2fs_sb_info *sbi,
423static void gc_node_segment(struct f2fs_sb_info *sbi,
403 struct f2fs_summary *sum, unsigned int segno, int gc_type)
404{
405 bool initial = true;
406 struct f2fs_summary *entry;
407 block_t start_addr;
408 int off;
409
410 start_addr = START_BLOCK(sbi, segno);
411
412next_step:
413 entry = sum;
414
415 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
416 nid_t nid = le32_to_cpu(entry->nid);
417 struct page *node_page;
418 struct node_info ni;
419
420 /* stop BG_GC if there is not enough free sections. */
421 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
424 struct f2fs_summary *sum, unsigned int segno, int gc_type)
425{
426 bool initial = true;
427 struct f2fs_summary *entry;
428 block_t start_addr;
429 int off;
430
431 start_addr = START_BLOCK(sbi, segno);
432
433next_step:
434 entry = sum;
435
436 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
437 nid_t nid = le32_to_cpu(entry->nid);
438 struct page *node_page;
439 struct node_info ni;
440
441 /* stop BG_GC if there is not enough free sections. */
442 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
422 return 0;
443 return;
423
424 if (check_valid_map(sbi, segno, off) == 0)
425 continue;
426
427 if (initial) {
428 ra_node_page(sbi, nid);
429 continue;
430 }

--- 10 unchanged lines hidden (view full) ---

441 get_node_info(sbi, nid, &ni);
442 if (ni.blk_addr != start_addr + off) {
443 f2fs_put_page(node_page, 1);
444 continue;
445 }
446
447 /* set page dirty and write it */
448 if (gc_type == FG_GC) {
444
445 if (check_valid_map(sbi, segno, off) == 0)
446 continue;
447
448 if (initial) {
449 ra_node_page(sbi, nid);
450 continue;
451 }

--- 10 unchanged lines hidden (view full) ---

462 get_node_info(sbi, nid, &ni);
463 if (ni.blk_addr != start_addr + off) {
464 f2fs_put_page(node_page, 1);
465 continue;
466 }
467
468 /* set page dirty and write it */
469 if (gc_type == FG_GC) {
449 f2fs_wait_on_page_writeback(node_page, NODE);
470 f2fs_wait_on_page_writeback(node_page, NODE, true);
450 set_page_dirty(node_page);
451 } else {
452 if (!PageWriteback(node_page))
453 set_page_dirty(node_page);
454 }
455 f2fs_put_page(node_page, 1);
456 stat_inc_node_blk_count(sbi, 1, gc_type);
457 }
458
459 if (initial) {
460 initial = false;
461 goto next_step;
462 }
471 set_page_dirty(node_page);
472 } else {
473 if (!PageWriteback(node_page))
474 set_page_dirty(node_page);
475 }
476 f2fs_put_page(node_page, 1);
477 stat_inc_node_blk_count(sbi, 1, gc_type);
478 }
479
480 if (initial) {
481 initial = false;
482 goto next_step;
483 }
463
464 if (gc_type == FG_GC) {
465 struct writeback_control wbc = {
466 .sync_mode = WB_SYNC_ALL,
467 .nr_to_write = LONG_MAX,
468 .for_reclaim = 0,
469 };
470 sync_node_pages(sbi, 0, &wbc);
471
472 /* return 1 only if FG_GC succefully reclaimed one */
473 if (get_valid_blocks(sbi, segno, 1) == 0)
474 return 1;
475 }
476 return 0;
477}
478
479/*
480 * Calculate start block index indicating the given node offset.
481 * Be careful, caller should give this node offset only indicating direct node
482 * blocks. If any node offsets, which point the other types of node blocks such
483 * as indirect or double indirect node blocks, are given, it must be a caller's
484 * bug.
485 */
484}
485
486/*
487 * Calculate start block index indicating the given node offset.
488 * Be careful, caller should give this node offset only indicating direct node
489 * blocks. If any node offsets, which point the other types of node blocks such
490 * as indirect or double indirect node blocks, are given, it must be a caller's
491 * bug.
492 */
486block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
493block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
487{
488 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
489 unsigned int bidx;
490
491 if (node_ofs == 0)
492 return 0;
493
494 if (node_ofs <= 2) {
495 bidx = node_ofs - 1;
496 } else if (node_ofs <= indirect_blks) {
497 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
498 bidx = node_ofs - 2 - dec;
499 } else {
500 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
501 bidx = node_ofs - 5 - dec;
502 }
494{
495 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
496 unsigned int bidx;
497
498 if (node_ofs == 0)
499 return 0;
500
501 if (node_ofs <= 2) {
502 bidx = node_ofs - 1;
503 } else if (node_ofs <= indirect_blks) {
504 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
505 bidx = node_ofs - 2 - dec;
506 } else {
507 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
508 bidx = node_ofs - 5 - dec;
509 }
503 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
510 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
504}
505
506static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
507 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
508{
509 struct page *node_page;
510 nid_t nid;
511 unsigned int ofs_in_node;

--- 29 unchanged lines hidden (view full) ---

541 .type = DATA,
542 .rw = READ_SYNC,
543 .encrypted_page = NULL,
544 };
545 struct dnode_of_data dn;
546 struct f2fs_summary sum;
547 struct node_info ni;
548 struct page *page;
511}
512
513static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
514 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
515{
516 struct page *node_page;
517 nid_t nid;
518 unsigned int ofs_in_node;

--- 29 unchanged lines hidden (view full) ---

548 .type = DATA,
549 .rw = READ_SYNC,
550 .encrypted_page = NULL,
551 };
552 struct dnode_of_data dn;
553 struct f2fs_summary sum;
554 struct node_info ni;
555 struct page *page;
556 block_t newaddr;
549 int err;
550
551 /* do not read out */
552 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
553 if (!page)
554 return;
555
556 set_new_dnode(&dn, inode, NULL, NULL, 0);

--- 5 unchanged lines hidden (view full) ---

562 ClearPageUptodate(page);
563 goto put_out;
564 }
565
566 /*
567 * don't cache encrypted data into meta inode until previous dirty
568 * data were writebacked to avoid racing between GC and flush.
569 */
557 int err;
558
559 /* do not read out */
560 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
561 if (!page)
562 return;
563
564 set_new_dnode(&dn, inode, NULL, NULL, 0);

--- 5 unchanged lines hidden (view full) ---

570 ClearPageUptodate(page);
571 goto put_out;
572 }
573
574 /*
575 * don't cache encrypted data into meta inode until previous dirty
576 * data were writebacked to avoid racing between GC and flush.
577 */
570 f2fs_wait_on_page_writeback(page, DATA);
578 f2fs_wait_on_page_writeback(page, DATA, true);
571
572 get_node_info(fio.sbi, dn.nid, &ni);
573 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
574
575 /* read page */
576 fio.page = page;
579
580 get_node_info(fio.sbi, dn.nid, &ni);
581 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
582
583 /* read page */
584 fio.page = page;
577 fio.blk_addr = dn.data_blkaddr;
585 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
578
586
579 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi),
580 fio.blk_addr,
581 FGP_LOCK|FGP_CREAT,
582 GFP_NOFS);
583 if (!fio.encrypted_page)
584 goto put_out;
587 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
588 &sum, CURSEG_COLD_DATA);
585
589
590 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
591 FGP_LOCK | FGP_CREAT, GFP_NOFS);
592 if (!fio.encrypted_page) {
593 err = -ENOMEM;
594 goto recover_block;
595 }
596
586 err = f2fs_submit_page_bio(&fio);
587 if (err)
588 goto put_page_out;
589
590 /* write page */
591 lock_page(fio.encrypted_page);
592
597 err = f2fs_submit_page_bio(&fio);
598 if (err)
599 goto put_page_out;
600
601 /* write page */
602 lock_page(fio.encrypted_page);
603
593 if (unlikely(!PageUptodate(fio.encrypted_page)))
604 if (unlikely(!PageUptodate(fio.encrypted_page))) {
605 err = -EIO;
594 goto put_page_out;
606 goto put_page_out;
595 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
607 }
608 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
609 err = -EIO;
596 goto put_page_out;
610 goto put_page_out;
611 }
597
598 set_page_dirty(fio.encrypted_page);
612
613 set_page_dirty(fio.encrypted_page);
599 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA);
614 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
600 if (clear_page_dirty_for_io(fio.encrypted_page))
601 dec_page_count(fio.sbi, F2FS_DIRTY_META);
602
603 set_page_writeback(fio.encrypted_page);
604
605 /* allocate block address */
615 if (clear_page_dirty_for_io(fio.encrypted_page))
616 dec_page_count(fio.sbi, F2FS_DIRTY_META);
617
618 set_page_writeback(fio.encrypted_page);
619
620 /* allocate block address */
606 f2fs_wait_on_page_writeback(dn.node_page, NODE);
607 allocate_data_block(fio.sbi, NULL, fio.blk_addr,
608 &fio.blk_addr, &sum, CURSEG_COLD_DATA);
621 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
622
609 fio.rw = WRITE_SYNC;
623 fio.rw = WRITE_SYNC;
624 fio.new_blkaddr = newaddr;
610 f2fs_submit_page_mbio(&fio);
611
625 f2fs_submit_page_mbio(&fio);
626
612 dn.data_blkaddr = fio.blk_addr;
627 dn.data_blkaddr = fio.new_blkaddr;
613 set_data_blkaddr(&dn);
614 f2fs_update_extent_cache(&dn);
615 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
616 if (page->index == 0)
617 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
618put_page_out:
619 f2fs_put_page(fio.encrypted_page, 1);
628 set_data_blkaddr(&dn);
629 f2fs_update_extent_cache(&dn);
630 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
631 if (page->index == 0)
632 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
633put_page_out:
634 f2fs_put_page(fio.encrypted_page, 1);
635recover_block:
636 if (err)
637 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
638 true, true);
620put_out:
621 f2fs_put_dnode(&dn);
622out:
623 f2fs_put_page(page, 1);
624}
625
626static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
627{

--- 12 unchanged lines hidden (view full) ---

640 struct f2fs_io_info fio = {
641 .sbi = F2FS_I_SB(inode),
642 .type = DATA,
643 .rw = WRITE_SYNC,
644 .page = page,
645 .encrypted_page = NULL,
646 };
647 set_page_dirty(page);
639put_out:
640 f2fs_put_dnode(&dn);
641out:
642 f2fs_put_page(page, 1);
643}
644
645static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
646{

--- 12 unchanged lines hidden (view full) ---

659 struct f2fs_io_info fio = {
660 .sbi = F2FS_I_SB(inode),
661 .type = DATA,
662 .rw = WRITE_SYNC,
663 .page = page,
664 .encrypted_page = NULL,
665 };
666 set_page_dirty(page);
648 f2fs_wait_on_page_writeback(page, DATA);
667 f2fs_wait_on_page_writeback(page, DATA, true);
649 if (clear_page_dirty_for_io(page))
650 inode_dec_dirty_pages(inode);
651 set_cold_data(page);
652 do_write_data_page(&fio);
653 clear_cold_data(page);
654 }
655out:
656 f2fs_put_page(page, 1);
657}
658
659/*
660 * This function tries to get parent node of victim data block, and identifies
661 * data block validity. If the block is valid, copy that with cold status and
662 * modify parent node.
663 * If the parent node is not valid or the data block address is different,
664 * the victim data block is ignored.
665 */
668 if (clear_page_dirty_for_io(page))
669 inode_dec_dirty_pages(inode);
670 set_cold_data(page);
671 do_write_data_page(&fio);
672 clear_cold_data(page);
673 }
674out:
675 f2fs_put_page(page, 1);
676}
677
678/*
679 * This function tries to get parent node of victim data block, and identifies
680 * data block validity. If the block is valid, copy that with cold status and
681 * modify parent node.
682 * If the parent node is not valid or the data block address is different,
683 * the victim data block is ignored.
684 */
666static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
685static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
667 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
668{
669 struct super_block *sb = sbi->sb;
670 struct f2fs_summary *entry;
671 block_t start_addr;
672 int off;
673 int phase = 0;
674

--- 6 unchanged lines hidden (view full) ---

681 struct page *data_page;
682 struct inode *inode;
683 struct node_info dni; /* dnode info for the data */
684 unsigned int ofs_in_node, nofs;
685 block_t start_bidx;
686
687 /* stop BG_GC if there is not enough free sections. */
688 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
686 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
687{
688 struct super_block *sb = sbi->sb;
689 struct f2fs_summary *entry;
690 block_t start_addr;
691 int off;
692 int phase = 0;
693

--- 6 unchanged lines hidden (view full) ---

700 struct page *data_page;
701 struct inode *inode;
702 struct node_info dni; /* dnode info for the data */
703 unsigned int ofs_in_node, nofs;
704 block_t start_bidx;
705
706 /* stop BG_GC if there is not enough free sections. */
707 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
689 return 0;
708 return;
690
691 if (check_valid_map(sbi, segno, off) == 0)
692 continue;
693
694 if (phase == 0) {
695 ra_node_page(sbi, le32_to_cpu(entry->nid));
696 continue;
697 }

--- 16 unchanged lines hidden (view full) ---

714
715 /* if encrypted inode, let's go phase 3 */
716 if (f2fs_encrypted_inode(inode) &&
717 S_ISREG(inode->i_mode)) {
718 add_gc_inode(gc_list, inode);
719 continue;
720 }
721
709
710 if (check_valid_map(sbi, segno, off) == 0)
711 continue;
712
713 if (phase == 0) {
714 ra_node_page(sbi, le32_to_cpu(entry->nid));
715 continue;
716 }

--- 16 unchanged lines hidden (view full) ---

733
734 /* if encrypted inode, let's go phase 3 */
735 if (f2fs_encrypted_inode(inode) &&
736 S_ISREG(inode->i_mode)) {
737 add_gc_inode(gc_list, inode);
738 continue;
739 }
740
722 start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
741 start_bidx = start_bidx_of_node(nofs, inode);
723 data_page = get_read_data_page(inode,
724 start_bidx + ofs_in_node, READA, true);
725 if (IS_ERR(data_page)) {
726 iput(inode);
727 continue;
728 }
729
730 f2fs_put_page(data_page, 0);
731 add_gc_inode(gc_list, inode);
732 continue;
733 }
734
735 /* phase 3 */
736 inode = find_gc_inode(gc_list, dni.ino);
737 if (inode) {
742 data_page = get_read_data_page(inode,
743 start_bidx + ofs_in_node, READA, true);
744 if (IS_ERR(data_page)) {
745 iput(inode);
746 continue;
747 }
748
749 f2fs_put_page(data_page, 0);
750 add_gc_inode(gc_list, inode);
751 continue;
752 }
753
754 /* phase 3 */
755 inode = find_gc_inode(gc_list, dni.ino);
756 if (inode) {
738 start_bidx = start_bidx_of_node(nofs, F2FS_I(inode))
757 start_bidx = start_bidx_of_node(nofs, inode)
739 + ofs_in_node;
740 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
741 move_encrypted_block(inode, start_bidx);
742 else
743 move_data_page(inode, start_bidx, gc_type);
744 stat_inc_data_blk_count(sbi, 1, gc_type);
745 }
746 }
747
748 if (++phase < 4)
749 goto next_step;
758 + ofs_in_node;
759 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
760 move_encrypted_block(inode, start_bidx);
761 else
762 move_data_page(inode, start_bidx, gc_type);
763 stat_inc_data_blk_count(sbi, 1, gc_type);
764 }
765 }
766
767 if (++phase < 4)
768 goto next_step;
750
751 if (gc_type == FG_GC) {
752 f2fs_submit_merged_bio(sbi, DATA, WRITE);
753
754 /* return 1 only if FG_GC succefully reclaimed one */
755 if (get_valid_blocks(sbi, segno, 1) == 0)
756 return 1;
757 }
758 return 0;
759}
760
761static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
762 int gc_type)
763{
764 struct sit_info *sit_i = SIT_I(sbi);
765 int ret;
766
767 mutex_lock(&sit_i->sentry_lock);
768 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
769 NO_CHECK_TYPE, LFS);
770 mutex_unlock(&sit_i->sentry_lock);
771 return ret;
772}
773
769}
770
771static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
772 int gc_type)
773{
774 struct sit_info *sit_i = SIT_I(sbi);
775 int ret;
776
777 mutex_lock(&sit_i->sentry_lock);
778 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
779 NO_CHECK_TYPE, LFS);
780 mutex_unlock(&sit_i->sentry_lock);
781 return ret;
782}
783
774static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
784static int do_garbage_collect(struct f2fs_sb_info *sbi,
785 unsigned int start_segno,
775 struct gc_inode_list *gc_list, int gc_type)
776{
777 struct page *sum_page;
778 struct f2fs_summary_block *sum;
779 struct blk_plug plug;
786 struct gc_inode_list *gc_list, int gc_type)
787{
788 struct page *sum_page;
789 struct f2fs_summary_block *sum;
790 struct blk_plug plug;
780 int nfree = 0;
791 unsigned int segno = start_segno;
792 unsigned int end_segno = start_segno + sbi->segs_per_sec;
793 int seg_freed = 0;
794 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
795 SUM_TYPE_DATA : SUM_TYPE_NODE;
781
796
782 /* read segment summary of victim */
783 sum_page = get_sum_page(sbi, segno);
797 /* readahead multi ssa blocks those have contiguous address */
798 if (sbi->segs_per_sec > 1)
799 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
800 sbi->segs_per_sec, META_SSA, true);
784
801
802 /* reference all summary page */
803 while (segno < end_segno) {
804 sum_page = get_sum_page(sbi, segno++);
805 unlock_page(sum_page);
806 }
807
785 blk_start_plug(&plug);
786
808 blk_start_plug(&plug);
809
787 sum = page_address(sum_page);
810 for (segno = start_segno; segno < end_segno; segno++) {
811 /* find segment summary of victim */
812 sum_page = find_get_page(META_MAPPING(sbi),
813 GET_SUM_BLOCK(sbi, segno));
814 f2fs_bug_on(sbi, !PageUptodate(sum_page));
815 f2fs_put_page(sum_page, 0);
788
816
789 /*
790 * this is to avoid deadlock:
791 * - lock_page(sum_page) - f2fs_replace_block
792 * - check_valid_map() - mutex_lock(sentry_lock)
793 * - mutex_lock(sentry_lock) - change_curseg()
794 * - lock_page(sum_page)
795 */
796 unlock_page(sum_page);
817 sum = page_address(sum_page);
818 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
797
819
798 switch (GET_SUM_TYPE((&sum->footer))) {
799 case SUM_TYPE_NODE:
800 nfree = gc_node_segment(sbi, sum->entries, segno, gc_type);
801 break;
802 case SUM_TYPE_DATA:
803 nfree = gc_data_segment(sbi, sum->entries, gc_list,
804 segno, gc_type);
805 break;
820 /*
821 * this is to avoid deadlock:
822 * - lock_page(sum_page) - f2fs_replace_block
823 * - check_valid_map() - mutex_lock(sentry_lock)
824 * - mutex_lock(sentry_lock) - change_curseg()
825 * - lock_page(sum_page)
826 */
827
828 if (type == SUM_TYPE_NODE)
829 gc_node_segment(sbi, sum->entries, segno, gc_type);
830 else
831 gc_data_segment(sbi, sum->entries, gc_list, segno,
832 gc_type);
833
834 stat_inc_seg_count(sbi, type, gc_type);
835
836 f2fs_put_page(sum_page, 0);
806 }
837 }
838
839 if (gc_type == FG_GC) {
840 if (type == SUM_TYPE_NODE) {
841 struct writeback_control wbc = {
842 .sync_mode = WB_SYNC_ALL,
843 .nr_to_write = LONG_MAX,
844 .for_reclaim = 0,
845 };
846 sync_node_pages(sbi, 0, &wbc);
847 } else {
848 f2fs_submit_merged_bio(sbi, DATA, WRITE);
849 }
850 }
851
807 blk_finish_plug(&plug);
808
852 blk_finish_plug(&plug);
853
809 stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type);
854 if (gc_type == FG_GC) {
855 while (start_segno < end_segno)
856 if (get_valid_blocks(sbi, start_segno++, 1) == 0)
857 seg_freed++;
858 }
859
810 stat_inc_call_count(sbi->stat_info);
811
860 stat_inc_call_count(sbi->stat_info);
861
812 f2fs_put_page(sum_page, 0);
813 return nfree;
862 return seg_freed;
814}
815
816int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
817{
863}
864
865int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
866{
818 unsigned int segno, i;
867 unsigned int segno;
819 int gc_type = sync ? FG_GC : BG_GC;
868 int gc_type = sync ? FG_GC : BG_GC;
820 int sec_freed = 0;
869 int sec_freed = 0, seg_freed;
821 int ret = -EINVAL;
822 struct cp_control cpc;
823 struct gc_inode_list gc_list = {
824 .ilist = LIST_HEAD_INIT(gc_list.ilist),
825 .iroot = RADIX_TREE_INIT(GFP_NOFS),
826 };
827
828 cpc.reason = __get_cp_reason(sbi);

--- 4 unchanged lines hidden (view full) ---

833 goto stop;
834 if (unlikely(f2fs_cp_error(sbi))) {
835 ret = -EIO;
836 goto stop;
837 }
838
839 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
840 gc_type = FG_GC;
870 int ret = -EINVAL;
871 struct cp_control cpc;
872 struct gc_inode_list gc_list = {
873 .ilist = LIST_HEAD_INIT(gc_list.ilist),
874 .iroot = RADIX_TREE_INIT(GFP_NOFS),
875 };
876
877 cpc.reason = __get_cp_reason(sbi);

--- 4 unchanged lines hidden (view full) ---

882 goto stop;
883 if (unlikely(f2fs_cp_error(sbi))) {
884 ret = -EIO;
885 goto stop;
886 }
887
888 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
889 gc_type = FG_GC;
890 /*
891 * If there is no victim and no prefree segment but still not
892 * enough free sections, we should flush dent/node blocks and do
893 * garbage collections.
894 */
841 if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
842 write_checkpoint(sbi, &cpc);
895 if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
896 write_checkpoint(sbi, &cpc);
897 else if (has_not_enough_free_secs(sbi, 0))
898 write_checkpoint(sbi, &cpc);
843 }
844
845 if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
846 goto stop;
847 ret = 0;
848
899 }
900
901 if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
902 goto stop;
903 ret = 0;
904
849 /* readahead multi ssa blocks those have contiguous address */
850 if (sbi->segs_per_sec > 1)
851 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
852 META_SSA, true);
905 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
853
906
854 for (i = 0; i < sbi->segs_per_sec; i++) {
855 /*
856 * for FG_GC case, halt gcing left segments once failed one
857 * of segments in selected section to avoid long latency.
858 */
859 if (!do_garbage_collect(sbi, segno + i, &gc_list, gc_type) &&
860 gc_type == FG_GC)
861 break;
862 }
863
864 if (i == sbi->segs_per_sec && gc_type == FG_GC)
907 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
865 sec_freed++;
866
867 if (gc_type == FG_GC)
868 sbi->cur_victim_sec = NULL_SEGNO;
869
870 if (!sync) {
871 if (has_not_enough_free_secs(sbi, sec_freed))
872 goto gc_more;

--- 18 unchanged lines hidden ---
908 sec_freed++;
909
910 if (gc_type == FG_GC)
911 sbi->cur_victim_sec = NULL_SEGNO;
912
913 if (!sync) {
914 if (has_not_enough_free_secs(sbi, sec_freed))
915 goto gc_more;

--- 18 unchanged lines hidden ---