segment.c (6ae1be13e85f4c42c8ca371fda50ae39eebbfd96) segment.c (3c62be17d4f562f43fe1d03b48194399caa35aa5)
1/*
2 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 389 unchanged lines hidden (view full) ---

398 sync_dirty_inodes(sbi, FILE_INODE);
399 blk_finish_plug(&plug);
400 }
401 f2fs_sync_fs(sbi->sb, true);
402 stat_inc_bg_cp_count(sbi->stat_info);
403 }
404}
405
1/*
2 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 389 unchanged lines hidden (view full) ---

398 sync_dirty_inodes(sbi, FILE_INODE);
399 blk_finish_plug(&plug);
400 }
401 f2fs_sync_fs(sbi->sb, true);
402 stat_inc_bg_cp_count(sbi->stat_info);
403 }
404}
405
406static int __submit_flush_wait(struct block_device *bdev)
407{
408 struct bio *bio = f2fs_bio_alloc(0);
409 int ret;
410
411 bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
412 bio->bi_bdev = bdev;
413 ret = submit_bio_wait(bio);
414 bio_put(bio);
415 return ret;
416}
417
418static int submit_flush_wait(struct f2fs_sb_info *sbi)
419{
420 int ret = __submit_flush_wait(sbi->sb->s_bdev);
421 int i;
422
423 if (sbi->s_ndevs && !ret) {
424 for (i = 1; i < sbi->s_ndevs; i++) {
425 ret = __submit_flush_wait(FDEV(i).bdev);
426 if (ret)
427 break;
428 }
429 }
430 return ret;
431}
432
406static int issue_flush_thread(void *data)
407{
408 struct f2fs_sb_info *sbi = data;
409 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
410 wait_queue_head_t *q = &fcc->flush_wait_queue;
411repeat:
412 if (kthread_should_stop())
413 return 0;
414
415 if (!llist_empty(&fcc->issue_list)) {
433static int issue_flush_thread(void *data)
434{
435 struct f2fs_sb_info *sbi = data;
436 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
437 wait_queue_head_t *q = &fcc->flush_wait_queue;
438repeat:
439 if (kthread_should_stop())
440 return 0;
441
442 if (!llist_empty(&fcc->issue_list)) {
416 struct bio *bio;
417 struct flush_cmd *cmd, *next;
418 int ret;
419
443 struct flush_cmd *cmd, *next;
444 int ret;
445
420 bio = f2fs_bio_alloc(0);
421
422 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
423 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
424
446 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
447 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
448
425 bio->bi_bdev = sbi->sb->s_bdev;
426 bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
427 ret = submit_bio_wait(bio);
428
449 ret = submit_flush_wait(sbi);
429 llist_for_each_entry_safe(cmd, next,
430 fcc->dispatch_list, llnode) {
431 cmd->ret = ret;
432 complete(&cmd->wait);
433 }
450 llist_for_each_entry_safe(cmd, next,
451 fcc->dispatch_list, llnode) {
452 cmd->ret = ret;
453 complete(&cmd->wait);
454 }
434 bio_put(bio);
435 fcc->dispatch_list = NULL;
436 }
437
438 wait_event_interruptible(*q,
439 kthread_should_stop() || !llist_empty(&fcc->issue_list));
440 goto repeat;
441}
442

--- 4 unchanged lines hidden (view full) ---

447
448 trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
449 test_opt(sbi, FLUSH_MERGE));
450
451 if (test_opt(sbi, NOBARRIER))
452 return 0;
453
454 if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) {
455 fcc->dispatch_list = NULL;
456 }
457
458 wait_event_interruptible(*q,
459 kthread_should_stop() || !llist_empty(&fcc->issue_list));
460 goto repeat;
461}
462

--- 4 unchanged lines hidden (view full) ---

467
468 trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
469 test_opt(sbi, FLUSH_MERGE));
470
471 if (test_opt(sbi, NOBARRIER))
472 return 0;
473
474 if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) {
455 struct bio *bio = f2fs_bio_alloc(0);
456 int ret;
457
458 atomic_inc(&fcc->submit_flush);
475 int ret;
476
477 atomic_inc(&fcc->submit_flush);
459 bio->bi_bdev = sbi->sb->s_bdev;
460 bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
461 ret = submit_bio_wait(bio);
478 ret = submit_flush_wait(sbi);
462 atomic_dec(&fcc->submit_flush);
479 atomic_dec(&fcc->submit_flush);
463 bio_put(bio);
464 return ret;
465 }
466
467 init_completion(&cmd.wait);
468
469 atomic_inc(&fcc->submit_flush);
470 llist_add(&cmd.llnode, &fcc->issue_list);
471

--- 160 unchanged lines hidden (view full) ---

632 struct bio_entry *be = (struct bio_entry *)bio->bi_private;
633
634 be->error = bio->bi_error;
635 complete(&be->event);
636}
637
638/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
639static int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi,
480 return ret;
481 }
482
483 init_completion(&cmd.wait);
484
485 atomic_inc(&fcc->submit_flush);
486 llist_add(&cmd.llnode, &fcc->issue_list);
487

--- 160 unchanged lines hidden (view full) ---

648 struct bio_entry *be = (struct bio_entry *)bio->bi_private;
649
650 be->error = bio->bi_error;
651 complete(&be->event);
652}
653
654/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
655static int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi,
640 block_t blkstart, block_t blklen)
656 struct block_device *bdev, block_t blkstart, block_t blklen)
641{
657{
642 struct block_device *bdev = sbi->sb->s_bdev;
643 struct bio *bio = NULL;
644 int err;
645
646 trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
647
658 struct bio *bio = NULL;
659 int err;
660
661 trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
662
663 if (sbi->s_ndevs) {
664 int devi = f2fs_target_device_index(sbi, blkstart);
665
666 blkstart -= FDEV(devi).start_blk;
667 }
648 err = __blkdev_issue_discard(bdev,
649 SECTOR_FROM_BLOCK(blkstart),
650 SECTOR_FROM_BLOCK(blklen),
651 GFP_NOFS, 0, &bio);
652 if (!err && bio) {
653 struct bio_entry *be = __add_bio_entry(sbi, bio);
654
655 bio->bi_private = be;
656 bio->bi_end_io = f2fs_submit_bio_wait_endio;
657 bio->bi_opf |= REQ_SYNC;
658 submit_bio(bio);
659 }
660
661 return err;
662}
663
664#ifdef CONFIG_BLK_DEV_ZONED
668 err = __blkdev_issue_discard(bdev,
669 SECTOR_FROM_BLOCK(blkstart),
670 SECTOR_FROM_BLOCK(blklen),
671 GFP_NOFS, 0, &bio);
672 if (!err && bio) {
673 struct bio_entry *be = __add_bio_entry(sbi, bio);
674
675 bio->bi_private = be;
676 bio->bi_end_io = f2fs_submit_bio_wait_endio;
677 bio->bi_opf |= REQ_SYNC;
678 submit_bio(bio);
679 }
680
681 return err;
682}
683
684#ifdef CONFIG_BLK_DEV_ZONED
665static int f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
666 block_t blkstart, block_t blklen)
685static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
686 struct block_device *bdev, block_t blkstart, block_t blklen)
667{
687{
668 sector_t sector = SECTOR_FROM_BLOCK(blkstart);
669 sector_t nr_sects = SECTOR_FROM_BLOCK(blklen);
688 sector_t nr_sects = SECTOR_FROM_BLOCK(blklen);
670 struct block_device *bdev = sbi->sb->s_bdev;
689 sector_t sector;
690 int devi = 0;
671
691
672 if (nr_sects != bdev_zone_size(bdev)) {
692 if (sbi->s_ndevs) {
693 devi = f2fs_target_device_index(sbi, blkstart);
694 blkstart -= FDEV(devi).start_blk;
695 }
696 sector = SECTOR_FROM_BLOCK(blkstart);
697
698 if (sector % bdev_zone_size(bdev) || nr_sects != bdev_zone_size(bdev)) {
673 f2fs_msg(sbi->sb, KERN_INFO,
699 f2fs_msg(sbi->sb, KERN_INFO,
674 "Unaligned discard attempted (sector %llu + %llu)",
675 (unsigned long long)sector,
676 (unsigned long long)nr_sects);
700 "(%d) %s: Unaligned discard attempted (block %x + %x)",
701 devi, sbi->s_ndevs ? FDEV(devi).path: "",
702 blkstart, blklen);
677 return -EIO;
678 }
679
680 /*
681 * We need to know the type of the zone: for conventional zones,
682 * use regular discard if the drive supports it. For sequential
683 * zones, reset the zone write pointer.
684 */
703 return -EIO;
704 }
705
706 /*
707 * We need to know the type of the zone: for conventional zones,
708 * use regular discard if the drive supports it. For sequential
709 * zones, reset the zone write pointer.
710 */
685 switch (get_blkz_type(sbi, blkstart)) {
711 switch (get_blkz_type(sbi, bdev, blkstart)) {
686
687 case BLK_ZONE_TYPE_CONVENTIONAL:
688 if (!blk_queue_discard(bdev_get_queue(bdev)))
689 return 0;
712
713 case BLK_ZONE_TYPE_CONVENTIONAL:
714 if (!blk_queue_discard(bdev_get_queue(bdev)))
715 return 0;
690 return __f2fs_issue_discard_async(sbi, blkstart,
691 blklen);
692
716 return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
693 case BLK_ZONE_TYPE_SEQWRITE_REQ:
694 case BLK_ZONE_TYPE_SEQWRITE_PREF:
695 trace_f2fs_issue_reset_zone(sbi->sb, blkstart);
696 return blkdev_reset_zones(bdev, sector,
697 nr_sects, GFP_NOFS);
698 default:
699 /* Unknown zone type: broken device ? */
700 return -EIO;
701 }
702}
703#endif
704
717 case BLK_ZONE_TYPE_SEQWRITE_REQ:
718 case BLK_ZONE_TYPE_SEQWRITE_PREF:
719 trace_f2fs_issue_reset_zone(sbi->sb, blkstart);
720 return blkdev_reset_zones(bdev, sector,
721 nr_sects, GFP_NOFS);
722 default:
723 /* Unknown zone type: broken device ? */
724 return -EIO;
725 }
726}
727#endif
728
729static int __issue_discard_async(struct f2fs_sb_info *sbi,
730 struct block_device *bdev, block_t blkstart, block_t blklen)
731{
732#ifdef CONFIG_BLK_DEV_ZONED
733 if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
734 bdev_zoned_model(bdev) != BLK_ZONED_NONE)
735 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
736#endif
737 return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
738}
739
705static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
706 block_t blkstart, block_t blklen)
707{
740static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
741 block_t blkstart, block_t blklen)
742{
743 sector_t start = blkstart, len = 0;
744 struct block_device *bdev;
708 struct seg_entry *se;
709 unsigned int offset;
710 block_t i;
745 struct seg_entry *se;
746 unsigned int offset;
747 block_t i;
748 int err = 0;
711
749
712 for (i = blkstart; i < blkstart + blklen; i++) {
750 bdev = f2fs_target_device(sbi, blkstart, NULL);
751
752 for (i = blkstart; i < blkstart + blklen; i++, len++) {
753 if (i != start) {
754 struct block_device *bdev2 =
755 f2fs_target_device(sbi, i, NULL);
756
757 if (bdev2 != bdev) {
758 err = __issue_discard_async(sbi, bdev,
759 start, len);
760 if (err)
761 return err;
762 bdev = bdev2;
763 start = i;
764 len = 0;
765 }
766 }
767
713 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
714 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
715
716 if (!f2fs_test_and_set_bit(offset, se->discard_map))
717 sbi->discard_blks--;
718 }
719
768 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
769 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
770
771 if (!f2fs_test_and_set_bit(offset, se->discard_map))
772 sbi->discard_blks--;
773 }
774
720#ifdef CONFIG_BLK_DEV_ZONED
721 if (f2fs_sb_mounted_blkzoned(sbi->sb))
722 return f2fs_issue_discard_zone(sbi, blkstart, blklen);
723#endif
724 return __f2fs_issue_discard_async(sbi, blkstart, blklen);
775 if (len)
776 err = __issue_discard_async(sbi, bdev, start, len);
777 return err;
725}
726
727static void __add_discard_entry(struct f2fs_sb_info *sbi,
728 struct cp_control *cpc, struct seg_entry *se,
729 unsigned int start, unsigned int end)
730{
731 struct list_head *head = &SM_I(sbi)->discard_list;
732 struct discard_entry *new, *last;

--- 2000 unchanged lines hidden ---
778}
779
780static void __add_discard_entry(struct f2fs_sb_info *sbi,
781 struct cp_control *cpc, struct seg_entry *se,
782 unsigned int start, unsigned int end)
783{
784 struct list_head *head = &SM_I(sbi)->discard_list;
785 struct discard_entry *new, *last;

--- 2000 unchanged lines hidden ---