blk-merge.c (6ab9e09238fdfd742fe23b81e2d385a1cab49d9b) | blk-merge.c (69840466086d2248898020a08dda52732686c4e6) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Functions related to segment and merge handling 4 */ 5#include <linux/kernel.h> 6#include <linux/module.h> 7#include <linux/bio.h> 8#include <linux/blkdev.h> --- 700 unchanged lines hidden (view full) --- 709 710 part_round_stats(req->q, cpu, part); 711 part_dec_in_flight(req->q, part, rq_data_dir(req)); 712 713 hd_struct_put(part); 714 part_stat_unlock(); 715 } 716} | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Functions related to segment and merge handling 4 */ 5#include <linux/kernel.h> 6#include <linux/module.h> 7#include <linux/bio.h> 8#include <linux/blkdev.h> --- 700 unchanged lines hidden (view full) --- 709 710 part_round_stats(req->q, cpu, part); 711 part_dec_in_flight(req->q, part, rq_data_dir(req)); 712 713 hd_struct_put(part); 714 part_stat_unlock(); 715 } 716} |
717/* 718 * Two cases of handling DISCARD merge: 719 * If max_discard_segments > 1, the driver takes every bio 720 * as a range and send them to controller together. The ranges 721 * needn't to be contiguous. 722 * Otherwise, the bios/requests will be handled as same as 723 * others which should be contiguous. 724 */ 725static inline bool blk_discard_mergable(struct request *req) 726{ 727 if (req_op(req) == REQ_OP_DISCARD && 728 queue_max_discard_segments(req->q) > 1) 729 return true; 730 return false; 731} |
|
717 | 732 |
733enum elv_merge blk_try_req_merge(struct request *req, struct request *next) 734{ 735 if (blk_discard_mergable(req)) 736 return ELEVATOR_DISCARD_MERGE; 737 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) 738 return ELEVATOR_BACK_MERGE; 739 740 return ELEVATOR_NO_MERGE; 741} 742 |
|
718/* 719 * For non-mq, this has to be called with the request spinlock acquired. 720 * For mq with scheduling, the appropriate queue wide lock should be held. 721 */ 722static struct request *attempt_merge(struct request_queue *q, 723 struct request *req, struct request *next) 724{ 725 if (!q->mq_ops) 726 lockdep_assert_held(q->queue_lock); 727 728 if (!rq_mergeable(req) || !rq_mergeable(next)) 729 return NULL; 730 731 if (req_op(req) != req_op(next)) 732 return NULL; 733 | 743/* 744 * For non-mq, this has to be called with the request spinlock acquired. 745 * For mq with scheduling, the appropriate queue wide lock should be held. 746 */ 747static struct request *attempt_merge(struct request_queue *q, 748 struct request *req, struct request *next) 749{ 750 if (!q->mq_ops) 751 lockdep_assert_held(q->queue_lock); 752 753 if (!rq_mergeable(req) || !rq_mergeable(next)) 754 return NULL; 755 756 if (req_op(req) != req_op(next)) 757 return NULL; 758 |
734 /* 735 * not contiguous 736 */ 737 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) 738 return NULL; 739 | |
740 if (rq_data_dir(req) != rq_data_dir(next) 741 || req->rq_disk != next->rq_disk 742 || req_no_special_merge(next)) 743 return NULL; 744 745 if (req_op(req) == REQ_OP_WRITE_SAME && 746 !blk_write_same_mergeable(req->bio, next->bio)) 747 return NULL; --- 7 unchanged lines hidden (view full) --- 755 756 /* 757 * If we are allowed to merge, then append bio list 758 * from next to rq and release next. merge_requests_fn 759 * will have updated segment counts, update sector 760 * counts here. Handle DISCARDs separately, as they 761 * have separate settings. 762 */ | 759 if (rq_data_dir(req) != rq_data_dir(next) 760 || req->rq_disk != next->rq_disk 761 || req_no_special_merge(next)) 762 return NULL; 763 764 if (req_op(req) == REQ_OP_WRITE_SAME && 765 !blk_write_same_mergeable(req->bio, next->bio)) 766 return NULL; --- 7 unchanged lines hidden (view full) --- 774 775 /* 776 * If we are allowed to merge, then append bio list 777 * from next to rq and release next. merge_requests_fn 778 * will have updated segment counts, update sector 779 * counts here. Handle DISCARDs separately, as they 780 * have separate settings. 781 */ |
763 if (req_op(req) == REQ_OP_DISCARD) { | 782 783 switch (blk_try_req_merge(req, next)) { 784 case ELEVATOR_DISCARD_MERGE: |
764 if (!req_attempt_discard_merge(q, req, next)) 765 return NULL; | 785 if (!req_attempt_discard_merge(q, req, next)) 786 return NULL; |
766 } else if (!ll_merge_requests_fn(q, req, next)) | 787 break; 788 case ELEVATOR_BACK_MERGE: 789 if (!ll_merge_requests_fn(q, req, next)) 790 return NULL; 791 break; 792 default: |
767 return NULL; | 793 return NULL; |
794 } |
|
768 769 /* 770 * If failfast settings disagree or any of the two is already 771 * a mixed merge, mark both as mixed before proceeding. This 772 * makes sure that all involved bios have mixable attributes 773 * set properly. 774 */ 775 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || --- 107 unchanged lines hidden (view full) --- 883 if (rq->write_hint != bio->bi_write_hint) 884 return false; 885 886 return true; 887} 888 889enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) 890{ | 795 796 /* 797 * If failfast settings disagree or any of the two is already 798 * a mixed merge, mark both as mixed before proceeding. This 799 * makes sure that all involved bios have mixable attributes 800 * set properly. 801 */ 802 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || --- 107 unchanged lines hidden (view full) --- 910 if (rq->write_hint != bio->bi_write_hint) 911 return false; 912 913 return true; 914} 915 916enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) 917{ |
891 if (req_op(rq) == REQ_OP_DISCARD && 892 queue_max_discard_segments(rq->q) > 1) | 918 if (blk_discard_mergable(rq)) |
893 return ELEVATOR_DISCARD_MERGE; 894 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) 895 return ELEVATOR_BACK_MERGE; 896 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) 897 return ELEVATOR_FRONT_MERGE; 898 return ELEVATOR_NO_MERGE; 899} | 919 return ELEVATOR_DISCARD_MERGE; 920 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) 921 return ELEVATOR_BACK_MERGE; 922 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) 923 return ELEVATOR_FRONT_MERGE; 924 return ELEVATOR_NO_MERGE; 925} |