segment.c (c829a33253e018472335b02e6d8bb1bb4213a142) segment.c (8b8dd65f72ccbf7111eb97c4c4f5b5df2a412a07)
1/*
2 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 236 unchanged lines hidden (view full) ---

245 mutex_lock(&fi->inmem_lock);
246 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
247 mutex_unlock(&fi->inmem_lock);
248
249 clear_inode_flag(inode, FI_ATOMIC_FILE);
250 stat_dec_atomic_write(inode);
251}
252
1/*
2 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 236 unchanged lines hidden (view full) ---

245 mutex_lock(&fi->inmem_lock);
246 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
247 mutex_unlock(&fi->inmem_lock);
248
249 clear_inode_flag(inode, FI_ATOMIC_FILE);
250 stat_dec_atomic_write(inode);
251}
252
253void drop_inmem_page(struct inode *inode, struct page *page)
254{
255 struct f2fs_inode_info *fi = F2FS_I(inode);
256 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
257 struct list_head *head = &fi->inmem_pages;
258 struct inmem_pages *cur = NULL;
259
260 f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
261
262 mutex_lock(&fi->inmem_lock);
263 list_for_each_entry(cur, head, list) {
264 if (cur->page == page)
265 break;
266 }
267
268 f2fs_bug_on(sbi, !cur || cur->page != page);
269 list_del(&cur->list);
270 mutex_unlock(&fi->inmem_lock);
271
272 dec_page_count(sbi, F2FS_INMEM_PAGES);
273 kmem_cache_free(inmem_entry_slab, cur);
274
275 ClearPageUptodate(page);
276 set_page_private(page, 0);
277 ClearPagePrivate(page);
278 f2fs_put_page(page, 0);
279
280 trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
281}
282
253static int __commit_inmem_pages(struct inode *inode,
254 struct list_head *revoke_list)
255{
256 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
257 struct f2fs_inode_info *fi = F2FS_I(inode);
258 struct inmem_pages *cur, *tmp;
259 struct f2fs_io_info fio = {
260 .sbi = sbi,

--- 145 unchanged lines hidden (view full) ---

406 sync_dirty_inodes(sbi, FILE_INODE);
407 blk_finish_plug(&plug);
408 }
409 f2fs_sync_fs(sbi->sb, true);
410 stat_inc_bg_cp_count(sbi->stat_info);
411 }
412}
413
283static int __commit_inmem_pages(struct inode *inode,
284 struct list_head *revoke_list)
285{
286 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
287 struct f2fs_inode_info *fi = F2FS_I(inode);
288 struct inmem_pages *cur, *tmp;
289 struct f2fs_io_info fio = {
290 .sbi = sbi,

--- 145 unchanged lines hidden (view full) ---

436 sync_dirty_inodes(sbi, FILE_INODE);
437 blk_finish_plug(&plug);
438 }
439 f2fs_sync_fs(sbi->sb, true);
440 stat_inc_bg_cp_count(sbi->stat_info);
441 }
442}
443
414static int __submit_flush_wait(struct block_device *bdev)
444static int __submit_flush_wait(struct f2fs_sb_info *sbi,
445 struct block_device *bdev)
415{
416 struct bio *bio = f2fs_bio_alloc(0);
417 int ret;
418
419 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
420 bio->bi_bdev = bdev;
421 ret = submit_bio_wait(bio);
422 bio_put(bio);
446{
447 struct bio *bio = f2fs_bio_alloc(0);
448 int ret;
449
450 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
451 bio->bi_bdev = bdev;
452 ret = submit_bio_wait(bio);
453 bio_put(bio);
454
455 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
456 test_opt(sbi, FLUSH_MERGE), ret);
423 return ret;
424}
425
426static int submit_flush_wait(struct f2fs_sb_info *sbi)
427{
457 return ret;
458}
459
460static int submit_flush_wait(struct f2fs_sb_info *sbi)
461{
428 int ret = __submit_flush_wait(sbi->sb->s_bdev);
462 int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev);
429 int i;
430
463 int i;
464
431 if (sbi->s_ndevs && !ret) {
432 for (i = 1; i < sbi->s_ndevs; i++) {
433 trace_f2fs_issue_flush(FDEV(i).bdev,
434 test_opt(sbi, NOBARRIER),
435 test_opt(sbi, FLUSH_MERGE));
436 ret = __submit_flush_wait(FDEV(i).bdev);
437 if (ret)
438 break;
439 }
465 if (!sbi->s_ndevs || ret)
466 return ret;
467
468 for (i = 1; i < sbi->s_ndevs; i++) {
469 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
470 if (ret)
471 break;
440 }
441 return ret;
442}
443
444static int issue_flush_thread(void *data)
445{
446 struct f2fs_sb_info *sbi = data;
447 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;

--- 5 unchanged lines hidden (view full) ---

453 if (!llist_empty(&fcc->issue_list)) {
454 struct flush_cmd *cmd, *next;
455 int ret;
456
457 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
458 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
459
460 ret = submit_flush_wait(sbi);
472 }
473 return ret;
474}
475
476static int issue_flush_thread(void *data)
477{
478 struct f2fs_sb_info *sbi = data;
479 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;

--- 5 unchanged lines hidden (view full) ---

485 if (!llist_empty(&fcc->issue_list)) {
486 struct flush_cmd *cmd, *next;
487 int ret;
488
489 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
490 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
491
492 ret = submit_flush_wait(sbi);
493 atomic_inc(&fcc->issued_flush);
494
461 llist_for_each_entry_safe(cmd, next,
462 fcc->dispatch_list, llnode) {
463 cmd->ret = ret;
464 complete(&cmd->wait);
465 }
466 fcc->dispatch_list = NULL;
467 }
468
469 wait_event_interruptible(*q,
470 kthread_should_stop() || !llist_empty(&fcc->issue_list));
471 goto repeat;
472}
473
474int f2fs_issue_flush(struct f2fs_sb_info *sbi)
475{
476 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
477 struct flush_cmd cmd;
495 llist_for_each_entry_safe(cmd, next,
496 fcc->dispatch_list, llnode) {
497 cmd->ret = ret;
498 complete(&cmd->wait);
499 }
500 fcc->dispatch_list = NULL;
501 }
502
503 wait_event_interruptible(*q,
504 kthread_should_stop() || !llist_empty(&fcc->issue_list));
505 goto repeat;
506}
507
508int f2fs_issue_flush(struct f2fs_sb_info *sbi)
509{
510 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
511 struct flush_cmd cmd;
512 int ret;
478
479 if (test_opt(sbi, NOBARRIER))
480 return 0;
481
513
514 if (test_opt(sbi, NOBARRIER))
515 return 0;
516
482 if (!test_opt(sbi, FLUSH_MERGE))
483 return submit_flush_wait(sbi);
517 if (!test_opt(sbi, FLUSH_MERGE)) {
518 ret = submit_flush_wait(sbi);
519 atomic_inc(&fcc->issued_flush);
520 return ret;
521 }
484
522
485 if (!atomic_read(&fcc->submit_flush)) {
486 int ret;
487
488 atomic_inc(&fcc->submit_flush);
523 if (!atomic_read(&fcc->issing_flush)) {
524 atomic_inc(&fcc->issing_flush);
489 ret = submit_flush_wait(sbi);
525 ret = submit_flush_wait(sbi);
490 atomic_dec(&fcc->submit_flush);
526 atomic_dec(&fcc->issing_flush);
527
528 atomic_inc(&fcc->issued_flush);
491 return ret;
492 }
493
494 init_completion(&cmd.wait);
495
529 return ret;
530 }
531
532 init_completion(&cmd.wait);
533
496 atomic_inc(&fcc->submit_flush);
534 atomic_inc(&fcc->issing_flush);
497 llist_add(&cmd.llnode, &fcc->issue_list);
498
499 if (!fcc->dispatch_list)
500 wake_up(&fcc->flush_wait_queue);
501
502 if (fcc->f2fs_issue_flush) {
503 wait_for_completion(&cmd.wait);
535 llist_add(&cmd.llnode, &fcc->issue_list);
536
537 if (!fcc->dispatch_list)
538 wake_up(&fcc->flush_wait_queue);
539
540 if (fcc->f2fs_issue_flush) {
541 wait_for_completion(&cmd.wait);
504 atomic_dec(&fcc->submit_flush);
542 atomic_dec(&fcc->issing_flush);
505 } else {
506 llist_del_all(&fcc->issue_list);
543 } else {
544 llist_del_all(&fcc->issue_list);
507 atomic_set(&fcc->submit_flush, 0);
545 atomic_set(&fcc->issing_flush, 0);
508 }
509
510 return cmd.ret;
511}
512
513int create_flush_cmd_control(struct f2fs_sb_info *sbi)
514{
515 dev_t dev = sbi->sb->s_bdev->bd_dev;
516 struct flush_cmd_control *fcc;
517 int err = 0;
518
519 if (SM_I(sbi)->fcc_info) {
520 fcc = SM_I(sbi)->fcc_info;
521 goto init_thread;
522 }
523
524 fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
525 if (!fcc)
526 return -ENOMEM;
546 }
547
548 return cmd.ret;
549}
550
551int create_flush_cmd_control(struct f2fs_sb_info *sbi)
552{
553 dev_t dev = sbi->sb->s_bdev->bd_dev;
554 struct flush_cmd_control *fcc;
555 int err = 0;
556
557 if (SM_I(sbi)->fcc_info) {
558 fcc = SM_I(sbi)->fcc_info;
559 goto init_thread;
560 }
561
562 fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
563 if (!fcc)
564 return -ENOMEM;
527 atomic_set(&fcc->submit_flush, 0);
565 atomic_set(&fcc->issued_flush, 0);
566 atomic_set(&fcc->issing_flush, 0);
528 init_waitqueue_head(&fcc->flush_wait_queue);
529 init_llist_head(&fcc->issue_list);
530 SM_I(sbi)->fcc_info = fcc;
531init_thread:
532 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
533 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
534 if (IS_ERR(fcc->f2fs_issue_flush)) {
535 err = PTR_ERR(fcc->f2fs_issue_flush);

--- 93 unchanged lines hidden (view full) ---

629 /* Recovery routine with SSR needs this */
630 __remove_dirty_segment(sbi, segno, DIRTY);
631 }
632
633 mutex_unlock(&dirty_i->seglist_lock);
634}
635
636static void __add_discard_cmd(struct f2fs_sb_info *sbi,
567 init_waitqueue_head(&fcc->flush_wait_queue);
568 init_llist_head(&fcc->issue_list);
569 SM_I(sbi)->fcc_info = fcc;
570init_thread:
571 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
572 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
573 if (IS_ERR(fcc->f2fs_issue_flush)) {
574 err = PTR_ERR(fcc->f2fs_issue_flush);

--- 93 unchanged lines hidden (view full) ---

668 /* Recovery routine with SSR needs this */
669 __remove_dirty_segment(sbi, segno, DIRTY);
670 }
671
672 mutex_unlock(&dirty_i->seglist_lock);
673}
674
675static void __add_discard_cmd(struct f2fs_sb_info *sbi,
637 struct bio *bio, block_t lstart, block_t len)
676 struct block_device *bdev, block_t lstart,
677 block_t start, block_t len)
638{
639 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
640 struct list_head *cmd_list = &(dcc->discard_cmd_list);
641 struct discard_cmd *dc;
642
643 dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
644 INIT_LIST_HEAD(&dc->list);
678{
679 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
680 struct list_head *cmd_list = &(dcc->discard_cmd_list);
681 struct discard_cmd *dc;
682
683 dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
684 INIT_LIST_HEAD(&dc->list);
645 dc->bio = bio;
646 bio->bi_private = dc;
685 dc->bdev = bdev;
647 dc->lstart = lstart;
686 dc->lstart = lstart;
687 dc->start = start;
648 dc->len = len;
649 dc->state = D_PREP;
688 dc->len = len;
689 dc->state = D_PREP;
690 dc->error = 0;
650 init_completion(&dc->wait);
651
652 mutex_lock(&dcc->cmd_lock);
653 list_add_tail(&dc->list, cmd_list);
654 mutex_unlock(&dcc->cmd_lock);
655}
656
657static void __remove_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd *dc)
658{
691 init_completion(&dc->wait);
692
693 mutex_lock(&dcc->cmd_lock);
694 list_add_tail(&dc->list, cmd_list);
695 mutex_unlock(&dcc->cmd_lock);
696}
697
698static void __remove_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd *dc)
699{
659 int err = dc->bio->bi_error;
660
661 if (dc->state == D_DONE)
700 if (dc->state == D_DONE)
662 atomic_dec(&(SM_I(sbi)->dcc_info->submit_discard));
701 atomic_dec(&(SM_I(sbi)->dcc_info->issing_discard));
663
702
664 if (err == -EOPNOTSUPP)
665 err = 0;
703 if (dc->error == -EOPNOTSUPP)
704 dc->error = 0;
666
705
667 if (err)
706 if (dc->error)
668 f2fs_msg(sbi->sb, KERN_INFO,
707 f2fs_msg(sbi->sb, KERN_INFO,
669 "Issue discard failed, ret: %d", err);
670 bio_put(dc->bio);
708 "Issue discard failed, ret: %d", dc->error);
671 list_del(&dc->list);
672 kmem_cache_free(discard_cmd_slab, dc);
673}
674
709 list_del(&dc->list);
710 kmem_cache_free(discard_cmd_slab, dc);
711}
712
713static void f2fs_submit_discard_endio(struct bio *bio)
714{
715 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
716
717 complete(&dc->wait);
718 dc->error = bio->bi_error;
719 dc->state = D_DONE;
720 bio_put(bio);
721}
722
723/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
724static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
725 struct discard_cmd *dc)
726{
727 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
728 struct bio *bio = NULL;
729
730 if (dc->state != D_PREP)
731 return;
732
733 dc->error = __blkdev_issue_discard(dc->bdev,
734 SECTOR_FROM_BLOCK(dc->start),
735 SECTOR_FROM_BLOCK(dc->len),
736 GFP_NOFS, 0, &bio);
737 if (!dc->error) {
738 /* should keep before submission to avoid D_DONE right away */
739 dc->state = D_SUBMIT;
740 atomic_inc(&dcc->issued_discard);
741 atomic_inc(&dcc->issing_discard);
742 if (bio) {
743 bio->bi_private = dc;
744 bio->bi_end_io = f2fs_submit_discard_endio;
745 bio->bi_opf |= REQ_SYNC;
746 submit_bio(bio);
747 }
748 } else {
749 __remove_discard_cmd(sbi, dc);
750 }
751}
752
753static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
754 struct block_device *bdev, block_t blkstart, block_t blklen)
755{
756 block_t lblkstart = blkstart;
757
758 trace_f2fs_issue_discard(bdev, blkstart, blklen);
759
760 if (sbi->s_ndevs) {
761 int devi = f2fs_target_device_index(sbi, blkstart);
762
763 blkstart -= FDEV(devi).start_blk;
764 }
765 __add_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen);
766 wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue);
767 return 0;
768}
769
770static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
771 struct discard_cmd *dc, block_t blkaddr)
772{
773 block_t end_block = START_BLOCK(sbi, GET_SEGNO(sbi, blkaddr) + 1);
774
775 if (dc->state == D_DONE || dc->lstart + dc->len <= end_block) {
776 __remove_discard_cmd(sbi, dc);
777 return;
778 }
779
780 if (blkaddr - dc->lstart < dc->lstart + dc->len - end_block) {
781 dc->start += (end_block - dc->lstart);
782 dc->len -= (end_block - dc->lstart);
783 dc->lstart = end_block;
784 } else {
785 dc->len = blkaddr - dc->lstart;
786 }
787}
788
675/* This should be covered by global mutex, &sit_i->sentry_lock */
676void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
677{
678 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
679 struct list_head *wait_list = &(dcc->discard_cmd_list);
680 struct discard_cmd *dc, *tmp;
681 struct blk_plug plug;
682
683 mutex_lock(&dcc->cmd_lock);
684
685 blk_start_plug(&plug);
686
687 list_for_each_entry_safe(dc, tmp, wait_list, list) {
688
689 if (blkaddr == NULL_ADDR) {
789/* This should be covered by global mutex, &sit_i->sentry_lock */
790void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
791{
792 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
793 struct list_head *wait_list = &(dcc->discard_cmd_list);
794 struct discard_cmd *dc, *tmp;
795 struct blk_plug plug;
796
797 mutex_lock(&dcc->cmd_lock);
798
799 blk_start_plug(&plug);
800
801 list_for_each_entry_safe(dc, tmp, wait_list, list) {
802
803 if (blkaddr == NULL_ADDR) {
690 if (dc->state == D_PREP) {
691 dc->state = D_SUBMIT;
692 submit_bio(dc->bio);
693 atomic_inc(&dcc->submit_discard);
694 }
804 __submit_discard_cmd(sbi, dc);
695 continue;
696 }
697
698 if (dc->lstart <= blkaddr && blkaddr < dc->lstart + dc->len) {
699 if (dc->state == D_SUBMIT)
700 wait_for_completion_io(&dc->wait);
805 continue;
806 }
807
808 if (dc->lstart <= blkaddr && blkaddr < dc->lstart + dc->len) {
809 if (dc->state == D_SUBMIT)
810 wait_for_completion_io(&dc->wait);
701 else
702 __remove_discard_cmd(sbi, dc);
811 __punch_discard_cmd(sbi, dc, blkaddr);
703 }
704 }
705 blk_finish_plug(&plug);
706
707 /* this comes from f2fs_put_super */
708 if (blkaddr == NULL_ADDR) {
709 list_for_each_entry_safe(dc, tmp, wait_list, list) {
710 wait_for_completion_io(&dc->wait);
711 __remove_discard_cmd(sbi, dc);
712 }
713 }
714 mutex_unlock(&dcc->cmd_lock);
715}
716
812 }
813 }
814 blk_finish_plug(&plug);
815
816 /* this comes from f2fs_put_super */
817 if (blkaddr == NULL_ADDR) {
818 list_for_each_entry_safe(dc, tmp, wait_list, list) {
819 wait_for_completion_io(&dc->wait);
820 __remove_discard_cmd(sbi, dc);
821 }
822 }
823 mutex_unlock(&dcc->cmd_lock);
824}
825
717static void f2fs_submit_discard_endio(struct bio *bio)
718{
719 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
720
721 complete(&dc->wait);
722 dc->state = D_DONE;
723}
724
725static int issue_discard_thread(void *data)
726{
727 struct f2fs_sb_info *sbi = data;
728 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
729 wait_queue_head_t *q = &dcc->discard_wait_queue;
730 struct list_head *cmd_list = &dcc->discard_cmd_list;
731 struct discard_cmd *dc, *tmp;
732 struct blk_plug plug;
733 int iter = 0;
734repeat:
735 if (kthread_should_stop())
736 return 0;
737
738 blk_start_plug(&plug);
739
740 mutex_lock(&dcc->cmd_lock);
741 list_for_each_entry_safe(dc, tmp, cmd_list, list) {
826static int issue_discard_thread(void *data)
827{
828 struct f2fs_sb_info *sbi = data;
829 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
830 wait_queue_head_t *q = &dcc->discard_wait_queue;
831 struct list_head *cmd_list = &dcc->discard_cmd_list;
832 struct discard_cmd *dc, *tmp;
833 struct blk_plug plug;
834 int iter = 0;
835repeat:
836 if (kthread_should_stop())
837 return 0;
838
839 blk_start_plug(&plug);
840
841 mutex_lock(&dcc->cmd_lock);
842 list_for_each_entry_safe(dc, tmp, cmd_list, list) {
742 if (dc->state == D_PREP) {
743 dc->state = D_SUBMIT;
744 submit_bio(dc->bio);
745 atomic_inc(&dcc->submit_discard);
746 if (iter++ > DISCARD_ISSUE_RATE)
747 break;
748 } else if (dc->state == D_DONE) {
843
844 if (is_idle(sbi))
845 __submit_discard_cmd(sbi, dc);
846
847 if (dc->state == D_PREP && iter++ > DISCARD_ISSUE_RATE)
848 break;
849 if (dc->state == D_DONE)
749 __remove_discard_cmd(sbi, dc);
850 __remove_discard_cmd(sbi, dc);
750 }
751 }
752 mutex_unlock(&dcc->cmd_lock);
753
754 blk_finish_plug(&plug);
755
756 iter = 0;
757 congestion_wait(BLK_RW_SYNC, HZ/50);
758
759 wait_event_interruptible(*q,
760 kthread_should_stop() || !list_empty(&dcc->discard_cmd_list));
761 goto repeat;
762}
763
851 }
852 mutex_unlock(&dcc->cmd_lock);
853
854 blk_finish_plug(&plug);
855
856 iter = 0;
857 congestion_wait(BLK_RW_SYNC, HZ/50);
858
859 wait_event_interruptible(*q,
860 kthread_should_stop() || !list_empty(&dcc->discard_cmd_list));
861 goto repeat;
862}
863
764
765/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
766static int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi,
767 struct block_device *bdev, block_t blkstart, block_t blklen)
768{
769 struct bio *bio = NULL;
770 block_t lblkstart = blkstart;
771 int err;
772
773 trace_f2fs_issue_discard(bdev, blkstart, blklen);
774
775 if (sbi->s_ndevs) {
776 int devi = f2fs_target_device_index(sbi, blkstart);
777
778 blkstart -= FDEV(devi).start_blk;
779 }
780 err = __blkdev_issue_discard(bdev,
781 SECTOR_FROM_BLOCK(blkstart),
782 SECTOR_FROM_BLOCK(blklen),
783 GFP_NOFS, 0, &bio);
784 if (!err && bio) {
785 bio->bi_end_io = f2fs_submit_discard_endio;
786 bio->bi_opf |= REQ_SYNC;
787
788 __add_discard_cmd(sbi, bio, lblkstart, blklen);
789 wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue);
790 }
791 return err;
792}
793
794#ifdef CONFIG_BLK_DEV_ZONED
795static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
796 struct block_device *bdev, block_t blkstart, block_t blklen)
797{
798 sector_t sector, nr_sects;
864#ifdef CONFIG_BLK_DEV_ZONED
865static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
866 struct block_device *bdev, block_t blkstart, block_t blklen)
867{
868 sector_t sector, nr_sects;
869 block_t lblkstart = blkstart;
799 int devi = 0;
800
801 if (sbi->s_ndevs) {
802 devi = f2fs_target_device_index(sbi, blkstart);
803 blkstart -= FDEV(devi).start_blk;
804 }
805
806 /*
807 * We need to know the type of the zone: for conventional zones,
808 * use regular discard if the drive supports it. For sequential
809 * zones, reset the zone write pointer.
810 */
811 switch (get_blkz_type(sbi, bdev, blkstart)) {
812
813 case BLK_ZONE_TYPE_CONVENTIONAL:
814 if (!blk_queue_discard(bdev_get_queue(bdev)))
815 return 0;
870 int devi = 0;
871
872 if (sbi->s_ndevs) {
873 devi = f2fs_target_device_index(sbi, blkstart);
874 blkstart -= FDEV(devi).start_blk;
875 }
876
877 /*
878 * We need to know the type of the zone: for conventional zones,
879 * use regular discard if the drive supports it. For sequential
880 * zones, reset the zone write pointer.
881 */
882 switch (get_blkz_type(sbi, bdev, blkstart)) {
883
884 case BLK_ZONE_TYPE_CONVENTIONAL:
885 if (!blk_queue_discard(bdev_get_queue(bdev)))
886 return 0;
816 return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
887 return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
817 case BLK_ZONE_TYPE_SEQWRITE_REQ:
818 case BLK_ZONE_TYPE_SEQWRITE_PREF:
819 sector = SECTOR_FROM_BLOCK(blkstart);
820 nr_sects = SECTOR_FROM_BLOCK(blklen);
821
822 if (sector & (bdev_zone_sectors(bdev) - 1) ||
823 nr_sects != bdev_zone_sectors(bdev)) {
824 f2fs_msg(sbi->sb, KERN_INFO,

--- 15 unchanged lines hidden (view full) ---

840static int __issue_discard_async(struct f2fs_sb_info *sbi,
841 struct block_device *bdev, block_t blkstart, block_t blklen)
842{
843#ifdef CONFIG_BLK_DEV_ZONED
844 if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
845 bdev_zoned_model(bdev) != BLK_ZONED_NONE)
846 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
847#endif
888 case BLK_ZONE_TYPE_SEQWRITE_REQ:
889 case BLK_ZONE_TYPE_SEQWRITE_PREF:
890 sector = SECTOR_FROM_BLOCK(blkstart);
891 nr_sects = SECTOR_FROM_BLOCK(blklen);
892
893 if (sector & (bdev_zone_sectors(bdev) - 1) ||
894 nr_sects != bdev_zone_sectors(bdev)) {
895 f2fs_msg(sbi->sb, KERN_INFO,

--- 15 unchanged lines hidden (view full) ---

911static int __issue_discard_async(struct f2fs_sb_info *sbi,
912 struct block_device *bdev, block_t blkstart, block_t blklen)
913{
914#ifdef CONFIG_BLK_DEV_ZONED
915 if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
916 bdev_zoned_model(bdev) != BLK_ZONED_NONE)
917 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
918#endif
848 return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
919 return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
849}
850
851static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
852 block_t blkstart, block_t blklen)
853{
854 sector_t start = blkstart, len = 0;
855 struct block_device *bdev;
856 struct seg_entry *se;

--- 210 unchanged lines hidden (view full) ---

1067
1068 dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
1069 if (!dcc)
1070 return -ENOMEM;
1071
1072 INIT_LIST_HEAD(&dcc->discard_entry_list);
1073 INIT_LIST_HEAD(&dcc->discard_cmd_list);
1074 mutex_init(&dcc->cmd_lock);
920}
921
922static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
923 block_t blkstart, block_t blklen)
924{
925 sector_t start = blkstart, len = 0;
926 struct block_device *bdev;
927 struct seg_entry *se;

--- 210 unchanged lines hidden (view full) ---

1138
1139 dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
1140 if (!dcc)
1141 return -ENOMEM;
1142
1143 INIT_LIST_HEAD(&dcc->discard_entry_list);
1144 INIT_LIST_HEAD(&dcc->discard_cmd_list);
1145 mutex_init(&dcc->cmd_lock);
1075 atomic_set(&dcc->submit_discard, 0);
1146 atomic_set(&dcc->issued_discard, 0);
1147 atomic_set(&dcc->issing_discard, 0);
1076 dcc->nr_discards = 0;
1077 dcc->max_discards = 0;
1078
1079 init_waitqueue_head(&dcc->discard_wait_queue);
1080 SM_I(sbi)->dcc_info = dcc;
1081init_thread:
1082 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
1083 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));

--- 367 unchanged lines hidden (view full) ---

1451 memset(sum_footer, 0, sizeof(struct summary_footer));
1452 if (IS_DATASEG(type))
1453 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
1454 if (IS_NODESEG(type))
1455 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
1456 __set_sit_entry_type(sbi, type, curseg->segno, modified);
1457}
1458
1148 dcc->nr_discards = 0;
1149 dcc->max_discards = 0;
1150
1151 init_waitqueue_head(&dcc->discard_wait_queue);
1152 SM_I(sbi)->dcc_info = dcc;
1153init_thread:
1154 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
1155 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));

--- 367 unchanged lines hidden (view full) ---

1523 memset(sum_footer, 0, sizeof(struct summary_footer));
1524 if (IS_DATASEG(type))
1525 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
1526 if (IS_NODESEG(type))
1527 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
1528 __set_sit_entry_type(sbi, type, curseg->segno, modified);
1529}
1530
1531static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
1532{
1533 if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
1534 return 0;
1535
1536 return CURSEG_I(sbi, type)->segno;
1537}
1538
1459/*
1460 * Allocate a current working segment.
1461 * This function always allocates a free segment in LFS manner.
1462 */
1463static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
1464{
1465 struct curseg_info *curseg = CURSEG_I(sbi, type);
1466 unsigned int segno = curseg->segno;
1467 int dir = ALLOC_LEFT;
1468
1469 write_sum_page(sbi, curseg->sum_blk,
1470 GET_SUM_BLOCK(sbi, segno));
1471 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
1472 dir = ALLOC_RIGHT;
1473
1474 if (test_opt(sbi, NOHEAP))
1475 dir = ALLOC_RIGHT;
1476
1539/*
1540 * Allocate a current working segment.
1541 * This function always allocates a free segment in LFS manner.
1542 */
1543static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
1544{
1545 struct curseg_info *curseg = CURSEG_I(sbi, type);
1546 unsigned int segno = curseg->segno;
1547 int dir = ALLOC_LEFT;
1548
1549 write_sum_page(sbi, curseg->sum_blk,
1550 GET_SUM_BLOCK(sbi, segno));
1551 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
1552 dir = ALLOC_RIGHT;
1553
1554 if (test_opt(sbi, NOHEAP))
1555 dir = ALLOC_RIGHT;
1556
1557 segno = __get_next_segno(sbi, type);
1477 get_new_segment(sbi, &segno, new_sec, dir);
1478 curseg->next_segno = segno;
1479 reset_curseg(sbi, type, 1);
1480 curseg->alloc_type = LFS;
1481}
1482
1483static void __next_free_blkoff(struct f2fs_sb_info *sbi,
1484 struct curseg_info *seg, block_t start)

--- 1555 unchanged lines hidden ---
1558 get_new_segment(sbi, &segno, new_sec, dir);
1559 curseg->next_segno = segno;
1560 reset_curseg(sbi, type, 1);
1561 curseg->alloc_type = LFS;
1562}
1563
1564static void __next_free_blkoff(struct f2fs_sb_info *sbi,
1565 struct curseg_info *seg, block_t start)

--- 1555 unchanged lines hidden ---