blk-merge.c (92243b6fc8fcb16bf401b055f7a0ba79f70a4115) blk-merge.c (52cc6eead9095e2faf2ec7afc013aa3af1f01ac5)
1/*
2 * Functions related to segment and merge handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>

--- 52 unchanged lines hidden (view full) ---

61
62 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
63}
64
65static struct bio *blk_bio_segment_split(struct request_queue *q,
66 struct bio *bio,
67 struct bio_set *bs)
68{
1/*
2 * Functions related to segment and merge handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>

--- 52 unchanged lines hidden (view full) ---

61
62 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
63}
64
65static struct bio *blk_bio_segment_split(struct request_queue *q,
66 struct bio *bio,
67 struct bio_set *bs)
68{
69 struct bio *split;
70 struct bio_vec bv, bvprv;
69 struct bio_vec bv, bvprv, *bvprvp = NULL;
71 struct bvec_iter iter;
72 unsigned seg_size = 0, nsegs = 0, sectors = 0;
70 struct bvec_iter iter;
71 unsigned seg_size = 0, nsegs = 0, sectors = 0;
73 int prev = 0;
74
75 bio_for_each_segment(bv, bio, iter) {
72
73 bio_for_each_segment(bv, bio, iter) {
76 sectors += bv.bv_len >> 9;
77
78 if (sectors > queue_max_sectors(q))
74 if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
79 goto split;
80
81 /*
82 * If the queue doesn't support SG gaps and adding this
83 * offset would create a gap, disallow it.
84 */
75 goto split;
76
77 /*
78 * If the queue doesn't support SG gaps and adding this
79 * offset would create a gap, disallow it.
80 */
85 if (prev && bvec_gap_to_prev(q, &bvprv, bv.bv_offset))
81 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
86 goto split;
87
82 goto split;
83
88 if (prev && blk_queue_cluster(q)) {
84 if (bvprvp && blk_queue_cluster(q)) {
89 if (seg_size + bv.bv_len > queue_max_segment_size(q))
90 goto new_segment;
85 if (seg_size + bv.bv_len > queue_max_segment_size(q))
86 goto new_segment;
91 if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
87 if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
92 goto new_segment;
88 goto new_segment;
93 if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
89 if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
94 goto new_segment;
95
96 seg_size += bv.bv_len;
97 bvprv = bv;
90 goto new_segment;
91
92 seg_size += bv.bv_len;
93 bvprv = bv;
98 prev = 1;
94 bvprvp = &bv;
95 sectors += bv.bv_len >> 9;
99 continue;
100 }
101new_segment:
102 if (nsegs == queue_max_segments(q))
103 goto split;
104
105 nsegs++;
106 bvprv = bv;
96 continue;
97 }
98new_segment:
99 if (nsegs == queue_max_segments(q))
100 goto split;
101
102 nsegs++;
103 bvprv = bv;
107 prev = 1;
104 bvprvp = &bv;
108 seg_size = bv.bv_len;
105 seg_size = bv.bv_len;
106 sectors += bv.bv_len >> 9;
109 }
110
111 return NULL;
112split:
107 }
108
109 return NULL;
110split:
113 split = bio_clone_bioset(bio, GFP_NOIO, bs);
114
115 split->bi_iter.bi_size -= iter.bi_size;
116 bio->bi_iter = iter;
117
118 if (bio_integrity(bio)) {
119 bio_integrity_advance(bio, split->bi_iter.bi_size);
120 bio_integrity_trim(split, 0, bio_sectors(split));
121 }
122
123 return split;
111 return bio_split(bio, sectors, GFP_NOIO, bs);
124}
125
126void blk_queue_split(struct request_queue *q, struct bio **bio,
127 struct bio_set *bs)
128{
129 struct bio *split;
130
131 if ((*bio)->bi_rw & REQ_DISCARD)

--- 302 unchanged lines hidden (view full) ---

434 if (req == q->last_merge)
435 q->last_merge = NULL;
436 return 0;
437}
438
439int ll_back_merge_fn(struct request_queue *q, struct request *req,
440 struct bio *bio)
441{
112}
113
114void blk_queue_split(struct request_queue *q, struct bio **bio,
115 struct bio_set *bs)
116{
117 struct bio *split;
118
119 if ((*bio)->bi_rw & REQ_DISCARD)

--- 302 unchanged lines hidden (view full) ---

422 if (req == q->last_merge)
423 q->last_merge = NULL;
424 return 0;
425}
426
427int ll_back_merge_fn(struct request_queue *q, struct request *req,
428 struct bio *bio)
429{
430 if (req_gap_back_merge(req, bio))
431 return 0;
432 if (blk_integrity_rq(req) &&
433 integrity_req_gap_back_merge(req, bio))
434 return 0;
442 if (blk_rq_sectors(req) + bio_sectors(bio) >
443 blk_rq_get_max_sectors(req)) {
444 req->cmd_flags |= REQ_NOMERGE;
445 if (req == q->last_merge)
446 q->last_merge = NULL;
447 return 0;
448 }
449 if (!bio_flagged(req->biotail, BIO_SEG_VALID))
450 blk_recount_segments(q, req->biotail);
451 if (!bio_flagged(bio, BIO_SEG_VALID))
452 blk_recount_segments(q, bio);
453
454 return ll_new_hw_segment(q, req, bio);
455}
456
457int ll_front_merge_fn(struct request_queue *q, struct request *req,
458 struct bio *bio)
459{
435 if (blk_rq_sectors(req) + bio_sectors(bio) >
436 blk_rq_get_max_sectors(req)) {
437 req->cmd_flags |= REQ_NOMERGE;
438 if (req == q->last_merge)
439 q->last_merge = NULL;
440 return 0;
441 }
442 if (!bio_flagged(req->biotail, BIO_SEG_VALID))
443 blk_recount_segments(q, req->biotail);
444 if (!bio_flagged(bio, BIO_SEG_VALID))
445 blk_recount_segments(q, bio);
446
447 return ll_new_hw_segment(q, req, bio);
448}
449
450int ll_front_merge_fn(struct request_queue *q, struct request *req,
451 struct bio *bio)
452{
453
454 if (req_gap_front_merge(req, bio))
455 return 0;
456 if (blk_integrity_rq(req) &&
457 integrity_req_gap_front_merge(req, bio))
458 return 0;
460 if (blk_rq_sectors(req) + bio_sectors(bio) >
461 blk_rq_get_max_sectors(req)) {
462 req->cmd_flags |= REQ_NOMERGE;
463 if (req == q->last_merge)
464 q->last_merge = NULL;
465 return 0;
466 }
467 if (!bio_flagged(bio, BIO_SEG_VALID))

--- 10 unchanged lines hidden (view full) ---

478 */
479static bool req_no_special_merge(struct request *req)
480{
481 struct request_queue *q = req->q;
482
483 return !q->mq_ops && req->special;
484}
485
459 if (blk_rq_sectors(req) + bio_sectors(bio) >
460 blk_rq_get_max_sectors(req)) {
461 req->cmd_flags |= REQ_NOMERGE;
462 if (req == q->last_merge)
463 q->last_merge = NULL;
464 return 0;
465 }
466 if (!bio_flagged(bio, BIO_SEG_VALID))

--- 10 unchanged lines hidden (view full) ---

477 */
478static bool req_no_special_merge(struct request *req)
479{
480 struct request_queue *q = req->q;
481
482 return !q->mq_ops && req->special;
483}
484
486static int req_gap_to_prev(struct request *req, struct bio *next)
487{
488 struct bio *prev = req->biotail;
489
490 return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1],
491 next->bi_io_vec[0].bv_offset);
492}
493
494static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
495 struct request *next)
496{
497 int total_phys_segments;
498 unsigned int seg_size =
499 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
500
501 /*
502 * First check if the either of the requests are re-queued
503 * requests. Can't merge them if they are.
504 */
505 if (req_no_special_merge(req) || req_no_special_merge(next))
506 return 0;
507
485static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
486 struct request *next)
487{
488 int total_phys_segments;
489 unsigned int seg_size =
490 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
491
492 /*
493 * First check if the either of the requests are re-queued
494 * requests. Can't merge them if they are.
495 */
496 if (req_no_special_merge(req) || req_no_special_merge(next))
497 return 0;
498
508 if (req_gap_to_prev(req, next->bio))
499 if (req_gap_back_merge(req, next->bio))
509 return 0;
510
511 /*
512 * Will it become too large?
513 */
514 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
515 blk_rq_get_max_sectors(req))
516 return 0;

--- 191 unchanged lines hidden (view full) ---

708 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
709 return false;
710
711 /* must be using the same buffer */
712 if (rq->cmd_flags & REQ_WRITE_SAME &&
713 !blk_write_same_mergeable(rq->bio, bio))
714 return false;
715
500 return 0;
501
502 /*
503 * Will it become too large?
504 */
505 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
506 blk_rq_get_max_sectors(req))
507 return 0;

--- 191 unchanged lines hidden (view full) ---

699 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
700 return false;
701
702 /* must be using the same buffer */
703 if (rq->cmd_flags & REQ_WRITE_SAME &&
704 !blk_write_same_mergeable(rq->bio, bio))
705 return false;
706
716 /* Only check gaps if the bio carries data */
717 if (bio_has_data(bio) && req_gap_to_prev(rq, bio))
718 return false;
719
720 return true;
721}
722
723int blk_try_merge(struct request *rq, struct bio *bio)
724{
725 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
726 return ELEVATOR_BACK_MERGE;
727 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
728 return ELEVATOR_FRONT_MERGE;
729 return ELEVATOR_NO_MERGE;
730}
707 return true;
708}
709
710int blk_try_merge(struct request *rq, struct bio *bio)
711{
712 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
713 return ELEVATOR_BACK_MERGE;
714 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
715 return ELEVATOR_FRONT_MERGE;
716 return ELEVATOR_NO_MERGE;
717}