1d6d48196SJens Axboe /* 2d6d48196SJens Axboe * Functions related to segment and merge handling 3d6d48196SJens Axboe */ 4d6d48196SJens Axboe #include <linux/kernel.h> 5d6d48196SJens Axboe #include <linux/module.h> 6d6d48196SJens Axboe #include <linux/bio.h> 7d6d48196SJens Axboe #include <linux/blkdev.h> 8d6d48196SJens Axboe #include <linux/scatterlist.h> 9d6d48196SJens Axboe 10d6d48196SJens Axboe #include "blk.h" 11d6d48196SJens Axboe 121e428079SJens Axboe static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 1307388549SMing Lei struct bio *bio, 1407388549SMing Lei bool no_sg_merge) 15d6d48196SJens Axboe { 167988613bSKent Overstreet struct bio_vec bv, bvprv = { NULL }; 1707388549SMing Lei int cluster, high, highprv = 1; 181e428079SJens Axboe unsigned int seg_size, nr_phys_segs; 1959247eaeSJens Axboe struct bio *fbio, *bbio; 207988613bSKent Overstreet struct bvec_iter iter; 21d6d48196SJens Axboe 221e428079SJens Axboe if (!bio) 231e428079SJens Axboe return 0; 24d6d48196SJens Axboe 255cb8850cSKent Overstreet /* 265cb8850cSKent Overstreet * This should probably be returning 0, but blk_add_request_payload() 275cb8850cSKent Overstreet * (Christoph!!!!) 285cb8850cSKent Overstreet */ 295cb8850cSKent Overstreet if (bio->bi_rw & REQ_DISCARD) 305cb8850cSKent Overstreet return 1; 315cb8850cSKent Overstreet 325cb8850cSKent Overstreet if (bio->bi_rw & REQ_WRITE_SAME) 335cb8850cSKent Overstreet return 1; 345cb8850cSKent Overstreet 351e428079SJens Axboe fbio = bio; 36e692cb66SMartin K. Petersen cluster = blk_queue_cluster(q); 375df97b91SMikulas Patocka seg_size = 0; 382c8919deSAndi Kleen nr_phys_segs = 0; 3905f1dd53SJens Axboe high = 0; 401e428079SJens Axboe for_each_bio(bio) { 417988613bSKent Overstreet bio_for_each_segment(bv, bio, iter) { 42d6d48196SJens Axboe /* 4305f1dd53SJens Axboe * If SG merging is disabled, each bio vector is 4405f1dd53SJens Axboe * a segment 4505f1dd53SJens Axboe */ 4605f1dd53SJens Axboe if (no_sg_merge) 4705f1dd53SJens Axboe goto new_segment; 4805f1dd53SJens Axboe 4905f1dd53SJens Axboe /* 501e428079SJens Axboe * the trick here is making sure that a high page is 5105f1dd53SJens Axboe * never considered part of another segment, since 5205f1dd53SJens Axboe * that might change with the bounce page. 53d6d48196SJens Axboe */ 547988613bSKent Overstreet high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); 557988613bSKent Overstreet if (!high && !highprv && cluster) { 567988613bSKent Overstreet if (seg_size + bv.bv_len 57ae03bf63SMartin K. Petersen > queue_max_segment_size(q)) 58d6d48196SJens Axboe goto new_segment; 597988613bSKent Overstreet if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) 60d6d48196SJens Axboe goto new_segment; 617988613bSKent Overstreet if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) 62d6d48196SJens Axboe goto new_segment; 63d6d48196SJens Axboe 647988613bSKent Overstreet seg_size += bv.bv_len; 65d6d48196SJens Axboe bvprv = bv; 66d6d48196SJens Axboe continue; 67d6d48196SJens Axboe } 68d6d48196SJens Axboe new_segment: 691e428079SJens Axboe if (nr_phys_segs == 1 && seg_size > 701e428079SJens Axboe fbio->bi_seg_front_size) 711e428079SJens Axboe fbio->bi_seg_front_size = seg_size; 7286771427SFUJITA Tomonori 73d6d48196SJens Axboe nr_phys_segs++; 74d6d48196SJens Axboe bvprv = bv; 757988613bSKent Overstreet seg_size = bv.bv_len; 76d6d48196SJens Axboe highprv = high; 77d6d48196SJens Axboe } 7859247eaeSJens Axboe bbio = bio; 791e428079SJens Axboe } 80d6d48196SJens Axboe 8159247eaeSJens Axboe if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) 8259247eaeSJens Axboe fbio->bi_seg_front_size = seg_size; 8359247eaeSJens Axboe if (seg_size > bbio->bi_seg_back_size) 8459247eaeSJens Axboe bbio->bi_seg_back_size = seg_size; 851e428079SJens Axboe 861e428079SJens Axboe return nr_phys_segs; 871e428079SJens Axboe } 881e428079SJens Axboe 891e428079SJens Axboe void blk_recalc_rq_segments(struct request *rq) 901e428079SJens Axboe { 9107388549SMing Lei bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, 9207388549SMing Lei &rq->q->queue_flags); 9307388549SMing Lei 9407388549SMing Lei rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, 9507388549SMing Lei no_sg_merge); 96d6d48196SJens Axboe } 97d6d48196SJens Axboe 98d6d48196SJens Axboe void blk_recount_segments(struct request_queue *q, struct bio *bio) 99d6d48196SJens Axboe { 1007f60dcaaSMing Lei unsigned short seg_cnt; 101764f612cSMing Lei 1027f60dcaaSMing Lei /* estimate segment number by bi_vcnt for non-cloned bio */ 1037f60dcaaSMing Lei if (bio_flagged(bio, BIO_CLONED)) 1047f60dcaaSMing Lei seg_cnt = bio_segments(bio); 1057f60dcaaSMing Lei else 1067f60dcaaSMing Lei seg_cnt = bio->bi_vcnt; 1077f60dcaaSMing Lei 1087f60dcaaSMing Lei if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && 1097f60dcaaSMing Lei (seg_cnt < queue_max_segments(q))) 1107f60dcaaSMing Lei bio->bi_phys_segments = seg_cnt; 11105f1dd53SJens Axboe else { 112d6d48196SJens Axboe struct bio *nxt = bio->bi_next; 1131e428079SJens Axboe 114d6d48196SJens Axboe bio->bi_next = NULL; 1157f60dcaaSMing Lei bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); 116d6d48196SJens Axboe bio->bi_next = nxt; 11705f1dd53SJens Axboe } 11805f1dd53SJens Axboe 119*b7c44ed9SJens Axboe bio_set_flag(bio, BIO_SEG_VALID); 120d6d48196SJens Axboe } 121d6d48196SJens Axboe EXPORT_SYMBOL(blk_recount_segments); 122d6d48196SJens Axboe 123d6d48196SJens Axboe static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 124d6d48196SJens Axboe struct bio *nxt) 125d6d48196SJens Axboe { 1262b8221e1SKent Overstreet struct bio_vec end_bv = { NULL }, nxt_bv; 127f619d254SKent Overstreet struct bvec_iter iter; 128f619d254SKent Overstreet 129e692cb66SMartin K. Petersen if (!blk_queue_cluster(q)) 130d6d48196SJens Axboe return 0; 131d6d48196SJens Axboe 13286771427SFUJITA Tomonori if (bio->bi_seg_back_size + nxt->bi_seg_front_size > 133ae03bf63SMartin K. Petersen queue_max_segment_size(q)) 134d6d48196SJens Axboe return 0; 135d6d48196SJens Axboe 136e17fc0a1SDavid Woodhouse if (!bio_has_data(bio)) 137e17fc0a1SDavid Woodhouse return 1; 138e17fc0a1SDavid Woodhouse 139f619d254SKent Overstreet bio_for_each_segment(end_bv, bio, iter) 140f619d254SKent Overstreet if (end_bv.bv_len == iter.bi_size) 141f619d254SKent Overstreet break; 142f619d254SKent Overstreet 143f619d254SKent Overstreet nxt_bv = bio_iovec(nxt); 144f619d254SKent Overstreet 145f619d254SKent Overstreet if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) 146e17fc0a1SDavid Woodhouse return 0; 147e17fc0a1SDavid Woodhouse 148d6d48196SJens Axboe /* 149e17fc0a1SDavid Woodhouse * bio and nxt are contiguous in memory; check if the queue allows 150d6d48196SJens Axboe * these two to be merged into one 151d6d48196SJens Axboe */ 152f619d254SKent Overstreet if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) 153d6d48196SJens Axboe return 1; 154d6d48196SJens Axboe 155d6d48196SJens Axboe return 0; 156d6d48196SJens Axboe } 157d6d48196SJens Axboe 1587988613bSKent Overstreet static inline void 159963ab9e5SAsias He __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, 1607988613bSKent Overstreet struct scatterlist *sglist, struct bio_vec *bvprv, 161963ab9e5SAsias He struct scatterlist **sg, int *nsegs, int *cluster) 162963ab9e5SAsias He { 163963ab9e5SAsias He 164963ab9e5SAsias He int nbytes = bvec->bv_len; 165963ab9e5SAsias He 1667988613bSKent Overstreet if (*sg && *cluster) { 167963ab9e5SAsias He if ((*sg)->length + nbytes > queue_max_segment_size(q)) 168963ab9e5SAsias He goto new_segment; 169963ab9e5SAsias He 1707988613bSKent Overstreet if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) 171963ab9e5SAsias He goto new_segment; 1727988613bSKent Overstreet if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) 173963ab9e5SAsias He goto new_segment; 174963ab9e5SAsias He 175963ab9e5SAsias He (*sg)->length += nbytes; 176963ab9e5SAsias He } else { 177963ab9e5SAsias He new_segment: 178963ab9e5SAsias He if (!*sg) 179963ab9e5SAsias He *sg = sglist; 180963ab9e5SAsias He else { 181963ab9e5SAsias He /* 182963ab9e5SAsias He * If the driver previously mapped a shorter 183963ab9e5SAsias He * list, we could see a termination bit 184963ab9e5SAsias He * prematurely unless it fully inits the sg 185963ab9e5SAsias He * table on each mapping. We KNOW that there 186963ab9e5SAsias He * must be more entries here or the driver 187963ab9e5SAsias He * would be buggy, so force clear the 188963ab9e5SAsias He * termination bit to avoid doing a full 189963ab9e5SAsias He * sg_init_table() in drivers for each command. 190963ab9e5SAsias He */ 191c8164d89SPaolo Bonzini sg_unmark_end(*sg); 192963ab9e5SAsias He *sg = sg_next(*sg); 193963ab9e5SAsias He } 194963ab9e5SAsias He 195963ab9e5SAsias He sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); 196963ab9e5SAsias He (*nsegs)++; 197963ab9e5SAsias He } 1987988613bSKent Overstreet *bvprv = *bvec; 199963ab9e5SAsias He } 200963ab9e5SAsias He 2015cb8850cSKent Overstreet static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, 2025cb8850cSKent Overstreet struct scatterlist *sglist, 2035cb8850cSKent Overstreet struct scatterlist **sg) 2045cb8850cSKent Overstreet { 2055cb8850cSKent Overstreet struct bio_vec bvec, bvprv = { NULL }; 2065cb8850cSKent Overstreet struct bvec_iter iter; 2075cb8850cSKent Overstreet int nsegs, cluster; 2085cb8850cSKent Overstreet 2095cb8850cSKent Overstreet nsegs = 0; 2105cb8850cSKent Overstreet cluster = blk_queue_cluster(q); 2115cb8850cSKent Overstreet 2125cb8850cSKent Overstreet if (bio->bi_rw & REQ_DISCARD) { 2135cb8850cSKent Overstreet /* 2145cb8850cSKent Overstreet * This is a hack - drivers should be neither modifying the 2155cb8850cSKent Overstreet * biovec, nor relying on bi_vcnt - but because of 2165cb8850cSKent Overstreet * blk_add_request_payload(), a discard bio may or may not have 2175cb8850cSKent Overstreet * a payload we need to set up here (thank you Christoph) and 2185cb8850cSKent Overstreet * bi_vcnt is really the only way of telling if we need to. 2195cb8850cSKent Overstreet */ 2205cb8850cSKent Overstreet 2215cb8850cSKent Overstreet if (bio->bi_vcnt) 2225cb8850cSKent Overstreet goto single_segment; 2235cb8850cSKent Overstreet 2245cb8850cSKent Overstreet return 0; 2255cb8850cSKent Overstreet } 2265cb8850cSKent Overstreet 2275cb8850cSKent Overstreet if (bio->bi_rw & REQ_WRITE_SAME) { 2285cb8850cSKent Overstreet single_segment: 2295cb8850cSKent Overstreet *sg = sglist; 2305cb8850cSKent Overstreet bvec = bio_iovec(bio); 2315cb8850cSKent Overstreet sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); 2325cb8850cSKent Overstreet return 1; 2335cb8850cSKent Overstreet } 2345cb8850cSKent Overstreet 2355cb8850cSKent Overstreet for_each_bio(bio) 2365cb8850cSKent Overstreet bio_for_each_segment(bvec, bio, iter) 2375cb8850cSKent Overstreet __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, 2385cb8850cSKent Overstreet &nsegs, &cluster); 2395cb8850cSKent Overstreet 2405cb8850cSKent Overstreet return nsegs; 2415cb8850cSKent Overstreet } 2425cb8850cSKent Overstreet 243d6d48196SJens Axboe /* 244d6d48196SJens Axboe * map a request to scatterlist, return number of sg entries setup. Caller 245d6d48196SJens Axboe * must make sure sg can hold rq->nr_phys_segments entries 246d6d48196SJens Axboe */ 247d6d48196SJens Axboe int blk_rq_map_sg(struct request_queue *q, struct request *rq, 248d6d48196SJens Axboe struct scatterlist *sglist) 249d6d48196SJens Axboe { 2505cb8850cSKent Overstreet struct scatterlist *sg = NULL; 2515cb8850cSKent Overstreet int nsegs = 0; 252d6d48196SJens Axboe 2535cb8850cSKent Overstreet if (rq->bio) 2545cb8850cSKent Overstreet nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); 255f18573abSFUJITA Tomonori 256f18573abSFUJITA Tomonori if (unlikely(rq->cmd_flags & REQ_COPY_USER) && 2572e46e8b2STejun Heo (blk_rq_bytes(rq) & q->dma_pad_mask)) { 2582e46e8b2STejun Heo unsigned int pad_len = 2592e46e8b2STejun Heo (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; 260f18573abSFUJITA Tomonori 261f18573abSFUJITA Tomonori sg->length += pad_len; 262f18573abSFUJITA Tomonori rq->extra_len += pad_len; 263f18573abSFUJITA Tomonori } 264f18573abSFUJITA Tomonori 2652fb98e84STejun Heo if (q->dma_drain_size && q->dma_drain_needed(rq)) { 2667b6d91daSChristoph Hellwig if (rq->cmd_flags & REQ_WRITE) 267db0a2e00STejun Heo memset(q->dma_drain_buffer, 0, q->dma_drain_size); 268db0a2e00STejun Heo 269d6d48196SJens Axboe sg->page_link &= ~0x02; 270d6d48196SJens Axboe sg = sg_next(sg); 271d6d48196SJens Axboe sg_set_page(sg, virt_to_page(q->dma_drain_buffer), 272d6d48196SJens Axboe q->dma_drain_size, 273d6d48196SJens Axboe ((unsigned long)q->dma_drain_buffer) & 274d6d48196SJens Axboe (PAGE_SIZE - 1)); 275d6d48196SJens Axboe nsegs++; 2767a85f889SFUJITA Tomonori rq->extra_len += q->dma_drain_size; 277d6d48196SJens Axboe } 278d6d48196SJens Axboe 279d6d48196SJens Axboe if (sg) 280d6d48196SJens Axboe sg_mark_end(sg); 281d6d48196SJens Axboe 282d6d48196SJens Axboe return nsegs; 283d6d48196SJens Axboe } 284d6d48196SJens Axboe EXPORT_SYMBOL(blk_rq_map_sg); 285d6d48196SJens Axboe 286d6d48196SJens Axboe static inline int ll_new_hw_segment(struct request_queue *q, 287d6d48196SJens Axboe struct request *req, 288d6d48196SJens Axboe struct bio *bio) 289d6d48196SJens Axboe { 290d6d48196SJens Axboe int nr_phys_segs = bio_phys_segments(q, bio); 291d6d48196SJens Axboe 29213f05c8dSMartin K. Petersen if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) 29313f05c8dSMartin K. Petersen goto no_merge; 29413f05c8dSMartin K. Petersen 2954eaf99beSMartin K. Petersen if (blk_integrity_merge_bio(q, req, bio) == false) 29613f05c8dSMartin K. Petersen goto no_merge; 297d6d48196SJens Axboe 298d6d48196SJens Axboe /* 299d6d48196SJens Axboe * This will form the start of a new hw segment. Bump both 300d6d48196SJens Axboe * counters. 301d6d48196SJens Axboe */ 302d6d48196SJens Axboe req->nr_phys_segments += nr_phys_segs; 303d6d48196SJens Axboe return 1; 30413f05c8dSMartin K. Petersen 30513f05c8dSMartin K. Petersen no_merge: 30613f05c8dSMartin K. Petersen req->cmd_flags |= REQ_NOMERGE; 30713f05c8dSMartin K. Petersen if (req == q->last_merge) 30813f05c8dSMartin K. Petersen q->last_merge = NULL; 30913f05c8dSMartin K. Petersen return 0; 310d6d48196SJens Axboe } 311d6d48196SJens Axboe 312d6d48196SJens Axboe int ll_back_merge_fn(struct request_queue *q, struct request *req, 313d6d48196SJens Axboe struct bio *bio) 314d6d48196SJens Axboe { 315f31dc1cdSMartin K. Petersen if (blk_rq_sectors(req) + bio_sectors(bio) > 316f31dc1cdSMartin K. Petersen blk_rq_get_max_sectors(req)) { 317d6d48196SJens Axboe req->cmd_flags |= REQ_NOMERGE; 318d6d48196SJens Axboe if (req == q->last_merge) 319d6d48196SJens Axboe q->last_merge = NULL; 320d6d48196SJens Axboe return 0; 321d6d48196SJens Axboe } 3222cdf79caSJens Axboe if (!bio_flagged(req->biotail, BIO_SEG_VALID)) 323d6d48196SJens Axboe blk_recount_segments(q, req->biotail); 3242cdf79caSJens Axboe if (!bio_flagged(bio, BIO_SEG_VALID)) 325d6d48196SJens Axboe blk_recount_segments(q, bio); 326d6d48196SJens Axboe 327d6d48196SJens Axboe return ll_new_hw_segment(q, req, bio); 328d6d48196SJens Axboe } 329d6d48196SJens Axboe 330d6d48196SJens Axboe int ll_front_merge_fn(struct request_queue *q, struct request *req, 331d6d48196SJens Axboe struct bio *bio) 332d6d48196SJens Axboe { 333f31dc1cdSMartin K. Petersen if (blk_rq_sectors(req) + bio_sectors(bio) > 334f31dc1cdSMartin K. Petersen blk_rq_get_max_sectors(req)) { 335d6d48196SJens Axboe req->cmd_flags |= REQ_NOMERGE; 336d6d48196SJens Axboe if (req == q->last_merge) 337d6d48196SJens Axboe q->last_merge = NULL; 338d6d48196SJens Axboe return 0; 339d6d48196SJens Axboe } 3402cdf79caSJens Axboe if (!bio_flagged(bio, BIO_SEG_VALID)) 341d6d48196SJens Axboe blk_recount_segments(q, bio); 3422cdf79caSJens Axboe if (!bio_flagged(req->bio, BIO_SEG_VALID)) 343d6d48196SJens Axboe blk_recount_segments(q, req->bio); 344d6d48196SJens Axboe 345d6d48196SJens Axboe return ll_new_hw_segment(q, req, bio); 346d6d48196SJens Axboe } 347d6d48196SJens Axboe 348e7e24500SJens Axboe /* 349e7e24500SJens Axboe * blk-mq uses req->special to carry normal driver per-request payload, it 350e7e24500SJens Axboe * does not indicate a prepared command that we cannot merge with. 351e7e24500SJens Axboe */ 352e7e24500SJens Axboe static bool req_no_special_merge(struct request *req) 353e7e24500SJens Axboe { 354e7e24500SJens Axboe struct request_queue *q = req->q; 355e7e24500SJens Axboe 356e7e24500SJens Axboe return !q->mq_ops && req->special; 357e7e24500SJens Axboe } 358e7e24500SJens Axboe 359854fbb9cSKeith Busch static int req_gap_to_prev(struct request *req, struct request *next) 360854fbb9cSKeith Busch { 361854fbb9cSKeith Busch struct bio *prev = req->biotail; 362854fbb9cSKeith Busch 363854fbb9cSKeith Busch return bvec_gap_to_prev(&prev->bi_io_vec[prev->bi_vcnt - 1], 364854fbb9cSKeith Busch next->bio->bi_io_vec[0].bv_offset); 365854fbb9cSKeith Busch } 366854fbb9cSKeith Busch 367d6d48196SJens Axboe static int ll_merge_requests_fn(struct request_queue *q, struct request *req, 368d6d48196SJens Axboe struct request *next) 369d6d48196SJens Axboe { 370d6d48196SJens Axboe int total_phys_segments; 37186771427SFUJITA Tomonori unsigned int seg_size = 37286771427SFUJITA Tomonori req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; 373d6d48196SJens Axboe 374d6d48196SJens Axboe /* 375d6d48196SJens Axboe * First check if the either of the requests are re-queued 376d6d48196SJens Axboe * requests. Can't merge them if they are. 377d6d48196SJens Axboe */ 378e7e24500SJens Axboe if (req_no_special_merge(req) || req_no_special_merge(next)) 379d6d48196SJens Axboe return 0; 380d6d48196SJens Axboe 381854fbb9cSKeith Busch if (test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags) && 382854fbb9cSKeith Busch req_gap_to_prev(req, next)) 383854fbb9cSKeith Busch return 0; 384854fbb9cSKeith Busch 385d6d48196SJens Axboe /* 386d6d48196SJens Axboe * Will it become too large? 387d6d48196SJens Axboe */ 388f31dc1cdSMartin K. Petersen if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > 389f31dc1cdSMartin K. Petersen blk_rq_get_max_sectors(req)) 390d6d48196SJens Axboe return 0; 391d6d48196SJens Axboe 392d6d48196SJens Axboe total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; 39386771427SFUJITA Tomonori if (blk_phys_contig_segment(q, req->biotail, next->bio)) { 39486771427SFUJITA Tomonori if (req->nr_phys_segments == 1) 39586771427SFUJITA Tomonori req->bio->bi_seg_front_size = seg_size; 39686771427SFUJITA Tomonori if (next->nr_phys_segments == 1) 39786771427SFUJITA Tomonori next->biotail->bi_seg_back_size = seg_size; 398d6d48196SJens Axboe total_phys_segments--; 39986771427SFUJITA Tomonori } 400d6d48196SJens Axboe 4018a78362cSMartin K. Petersen if (total_phys_segments > queue_max_segments(q)) 402d6d48196SJens Axboe return 0; 403d6d48196SJens Axboe 4044eaf99beSMartin K. Petersen if (blk_integrity_merge_rq(q, req, next) == false) 40513f05c8dSMartin K. Petersen return 0; 40613f05c8dSMartin K. Petersen 407d6d48196SJens Axboe /* Merge is OK... */ 408d6d48196SJens Axboe req->nr_phys_segments = total_phys_segments; 409d6d48196SJens Axboe return 1; 410d6d48196SJens Axboe } 411d6d48196SJens Axboe 41280a761fdSTejun Heo /** 41380a761fdSTejun Heo * blk_rq_set_mixed_merge - mark a request as mixed merge 41480a761fdSTejun Heo * @rq: request to mark as mixed merge 41580a761fdSTejun Heo * 41680a761fdSTejun Heo * Description: 41780a761fdSTejun Heo * @rq is about to be mixed merged. Make sure the attributes 41880a761fdSTejun Heo * which can be mixed are set in each bio and mark @rq as mixed 41980a761fdSTejun Heo * merged. 42080a761fdSTejun Heo */ 42180a761fdSTejun Heo void blk_rq_set_mixed_merge(struct request *rq) 42280a761fdSTejun Heo { 42380a761fdSTejun Heo unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 42480a761fdSTejun Heo struct bio *bio; 42580a761fdSTejun Heo 42680a761fdSTejun Heo if (rq->cmd_flags & REQ_MIXED_MERGE) 42780a761fdSTejun Heo return; 42880a761fdSTejun Heo 42980a761fdSTejun Heo /* 43080a761fdSTejun Heo * @rq will no longer represent mixable attributes for all the 43180a761fdSTejun Heo * contained bios. It will just track those of the first one. 43280a761fdSTejun Heo * Distributes the attributs to each bio. 43380a761fdSTejun Heo */ 43480a761fdSTejun Heo for (bio = rq->bio; bio; bio = bio->bi_next) { 43580a761fdSTejun Heo WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && 43680a761fdSTejun Heo (bio->bi_rw & REQ_FAILFAST_MASK) != ff); 43780a761fdSTejun Heo bio->bi_rw |= ff; 43880a761fdSTejun Heo } 43980a761fdSTejun Heo rq->cmd_flags |= REQ_MIXED_MERGE; 44080a761fdSTejun Heo } 44180a761fdSTejun Heo 44226308eabSJerome Marchand static void blk_account_io_merge(struct request *req) 44326308eabSJerome Marchand { 44426308eabSJerome Marchand if (blk_do_io_stat(req)) { 44526308eabSJerome Marchand struct hd_struct *part; 44626308eabSJerome Marchand int cpu; 44726308eabSJerome Marchand 44826308eabSJerome Marchand cpu = part_stat_lock(); 44909e099d4SJerome Marchand part = req->part; 45026308eabSJerome Marchand 45126308eabSJerome Marchand part_round_stats(cpu, part); 452316d315bSNikanth Karthikesan part_dec_in_flight(part, rq_data_dir(req)); 45326308eabSJerome Marchand 4546c23a968SJens Axboe hd_struct_put(part); 45526308eabSJerome Marchand part_stat_unlock(); 45626308eabSJerome Marchand } 45726308eabSJerome Marchand } 45826308eabSJerome Marchand 459d6d48196SJens Axboe /* 460d6d48196SJens Axboe * Has to be called with the request spinlock acquired 461d6d48196SJens Axboe */ 462d6d48196SJens Axboe static int attempt_merge(struct request_queue *q, struct request *req, 463d6d48196SJens Axboe struct request *next) 464d6d48196SJens Axboe { 465d6d48196SJens Axboe if (!rq_mergeable(req) || !rq_mergeable(next)) 466d6d48196SJens Axboe return 0; 467d6d48196SJens Axboe 468f31dc1cdSMartin K. Petersen if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) 469f31dc1cdSMartin K. Petersen return 0; 470f31dc1cdSMartin K. Petersen 471d6d48196SJens Axboe /* 472d6d48196SJens Axboe * not contiguous 473d6d48196SJens Axboe */ 47483096ebfSTejun Heo if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) 475d6d48196SJens Axboe return 0; 476d6d48196SJens Axboe 477d6d48196SJens Axboe if (rq_data_dir(req) != rq_data_dir(next) 478d6d48196SJens Axboe || req->rq_disk != next->rq_disk 479e7e24500SJens Axboe || req_no_special_merge(next)) 480d6d48196SJens Axboe return 0; 481d6d48196SJens Axboe 4824363ac7cSMartin K. Petersen if (req->cmd_flags & REQ_WRITE_SAME && 4834363ac7cSMartin K. Petersen !blk_write_same_mergeable(req->bio, next->bio)) 4844363ac7cSMartin K. Petersen return 0; 4854363ac7cSMartin K. Petersen 486d6d48196SJens Axboe /* 487d6d48196SJens Axboe * If we are allowed to merge, then append bio list 488d6d48196SJens Axboe * from next to rq and release next. merge_requests_fn 489d6d48196SJens Axboe * will have updated segment counts, update sector 490d6d48196SJens Axboe * counts here. 491d6d48196SJens Axboe */ 492d6d48196SJens Axboe if (!ll_merge_requests_fn(q, req, next)) 493d6d48196SJens Axboe return 0; 494d6d48196SJens Axboe 495d6d48196SJens Axboe /* 49680a761fdSTejun Heo * If failfast settings disagree or any of the two is already 49780a761fdSTejun Heo * a mixed merge, mark both as mixed before proceeding. This 49880a761fdSTejun Heo * makes sure that all involved bios have mixable attributes 49980a761fdSTejun Heo * set properly. 50080a761fdSTejun Heo */ 50180a761fdSTejun Heo if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || 50280a761fdSTejun Heo (req->cmd_flags & REQ_FAILFAST_MASK) != 50380a761fdSTejun Heo (next->cmd_flags & REQ_FAILFAST_MASK)) { 50480a761fdSTejun Heo blk_rq_set_mixed_merge(req); 50580a761fdSTejun Heo blk_rq_set_mixed_merge(next); 50680a761fdSTejun Heo } 50780a761fdSTejun Heo 50880a761fdSTejun Heo /* 509d6d48196SJens Axboe * At this point we have either done a back merge 510d6d48196SJens Axboe * or front merge. We need the smaller start_time of 511d6d48196SJens Axboe * the merged requests to be the current request 512d6d48196SJens Axboe * for accounting purposes. 513d6d48196SJens Axboe */ 514d6d48196SJens Axboe if (time_after(req->start_time, next->start_time)) 515d6d48196SJens Axboe req->start_time = next->start_time; 516d6d48196SJens Axboe 517d6d48196SJens Axboe req->biotail->bi_next = next->bio; 518d6d48196SJens Axboe req->biotail = next->biotail; 519d6d48196SJens Axboe 520a2dec7b3STejun Heo req->__data_len += blk_rq_bytes(next); 521d6d48196SJens Axboe 522d6d48196SJens Axboe elv_merge_requests(q, req, next); 523d6d48196SJens Axboe 52442dad764SJerome Marchand /* 52542dad764SJerome Marchand * 'next' is going away, so update stats accordingly 52642dad764SJerome Marchand */ 52742dad764SJerome Marchand blk_account_io_merge(next); 528d6d48196SJens Axboe 529d6d48196SJens Axboe req->ioprio = ioprio_best(req->ioprio, next->ioprio); 530ab780f1eSJens Axboe if (blk_rq_cpu_valid(next)) 531ab780f1eSJens Axboe req->cpu = next->cpu; 532d6d48196SJens Axboe 5331cd96c24SBoaz Harrosh /* owner-ship of bio passed from next to req */ 5341cd96c24SBoaz Harrosh next->bio = NULL; 535d6d48196SJens Axboe __blk_put_request(q, next); 536d6d48196SJens Axboe return 1; 537d6d48196SJens Axboe } 538d6d48196SJens Axboe 539d6d48196SJens Axboe int attempt_back_merge(struct request_queue *q, struct request *rq) 540d6d48196SJens Axboe { 541d6d48196SJens Axboe struct request *next = elv_latter_request(q, rq); 542d6d48196SJens Axboe 543d6d48196SJens Axboe if (next) 544d6d48196SJens Axboe return attempt_merge(q, rq, next); 545d6d48196SJens Axboe 546d6d48196SJens Axboe return 0; 547d6d48196SJens Axboe } 548d6d48196SJens Axboe 549d6d48196SJens Axboe int attempt_front_merge(struct request_queue *q, struct request *rq) 550d6d48196SJens Axboe { 551d6d48196SJens Axboe struct request *prev = elv_former_request(q, rq); 552d6d48196SJens Axboe 553d6d48196SJens Axboe if (prev) 554d6d48196SJens Axboe return attempt_merge(q, prev, rq); 555d6d48196SJens Axboe 556d6d48196SJens Axboe return 0; 557d6d48196SJens Axboe } 5585e84ea3aSJens Axboe 5595e84ea3aSJens Axboe int blk_attempt_req_merge(struct request_queue *q, struct request *rq, 5605e84ea3aSJens Axboe struct request *next) 5615e84ea3aSJens Axboe { 5625e84ea3aSJens Axboe return attempt_merge(q, rq, next); 5635e84ea3aSJens Axboe } 564050c8ea8STejun Heo 565050c8ea8STejun Heo bool blk_rq_merge_ok(struct request *rq, struct bio *bio) 566050c8ea8STejun Heo { 56766cb45aaSJens Axboe struct request_queue *q = rq->q; 56866cb45aaSJens Axboe 569e2a60da7SMartin K. Petersen if (!rq_mergeable(rq) || !bio_mergeable(bio)) 570050c8ea8STejun Heo return false; 571050c8ea8STejun Heo 572f31dc1cdSMartin K. Petersen if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) 573f31dc1cdSMartin K. Petersen return false; 574f31dc1cdSMartin K. Petersen 575050c8ea8STejun Heo /* different data direction or already started, don't merge */ 576050c8ea8STejun Heo if (bio_data_dir(bio) != rq_data_dir(rq)) 577050c8ea8STejun Heo return false; 578050c8ea8STejun Heo 579050c8ea8STejun Heo /* must be same device and not a special request */ 580e7e24500SJens Axboe if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) 581050c8ea8STejun Heo return false; 582050c8ea8STejun Heo 583050c8ea8STejun Heo /* only merge integrity protected bio into ditto rq */ 5844eaf99beSMartin K. Petersen if (blk_integrity_merge_bio(rq->q, rq, bio) == false) 585050c8ea8STejun Heo return false; 586050c8ea8STejun Heo 5874363ac7cSMartin K. Petersen /* must be using the same buffer */ 5884363ac7cSMartin K. Petersen if (rq->cmd_flags & REQ_WRITE_SAME && 5894363ac7cSMartin K. Petersen !blk_write_same_mergeable(rq->bio, bio)) 5904363ac7cSMartin K. Petersen return false; 5914363ac7cSMartin K. Petersen 592beefa6baSJens Axboe /* Only check gaps if the bio carries data */ 593beefa6baSJens Axboe if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && bio_has_data(bio)) { 59466cb45aaSJens Axboe struct bio_vec *bprev; 59566cb45aaSJens Axboe 5967ee8e4f3SWenbo Wang bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1]; 59766cb45aaSJens Axboe if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) 59866cb45aaSJens Axboe return false; 59966cb45aaSJens Axboe } 60066cb45aaSJens Axboe 601050c8ea8STejun Heo return true; 602050c8ea8STejun Heo } 603050c8ea8STejun Heo 604050c8ea8STejun Heo int blk_try_merge(struct request *rq, struct bio *bio) 605050c8ea8STejun Heo { 6064f024f37SKent Overstreet if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) 607050c8ea8STejun Heo return ELEVATOR_BACK_MERGE; 6084f024f37SKent Overstreet else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) 609050c8ea8STejun Heo return ELEVATOR_FRONT_MERGE; 610050c8ea8STejun Heo return ELEVATOR_NO_MERGE; 611050c8ea8STejun Heo } 612