1d6d48196SJens Axboe /* 2d6d48196SJens Axboe * Functions related to segment and merge handling 3d6d48196SJens Axboe */ 4d6d48196SJens Axboe #include <linux/kernel.h> 5d6d48196SJens Axboe #include <linux/module.h> 6d6d48196SJens Axboe #include <linux/bio.h> 7d6d48196SJens Axboe #include <linux/blkdev.h> 8d6d48196SJens Axboe #include <linux/scatterlist.h> 9d6d48196SJens Axboe 10d6d48196SJens Axboe #include "blk.h" 11d6d48196SJens Axboe 121e428079SJens Axboe static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 1307388549SMing Lei struct bio *bio, 1407388549SMing Lei bool no_sg_merge) 15d6d48196SJens Axboe { 167988613bSKent Overstreet struct bio_vec bv, bvprv = { NULL }; 1707388549SMing Lei int cluster, high, highprv = 1; 181e428079SJens Axboe unsigned int seg_size, nr_phys_segs; 1959247eaeSJens Axboe struct bio *fbio, *bbio; 207988613bSKent Overstreet struct bvec_iter iter; 21d6d48196SJens Axboe 221e428079SJens Axboe if (!bio) 231e428079SJens Axboe return 0; 24d6d48196SJens Axboe 255cb8850cSKent Overstreet /* 265cb8850cSKent Overstreet * This should probably be returning 0, but blk_add_request_payload() 275cb8850cSKent Overstreet * (Christoph!!!!) 285cb8850cSKent Overstreet */ 295cb8850cSKent Overstreet if (bio->bi_rw & REQ_DISCARD) 305cb8850cSKent Overstreet return 1; 315cb8850cSKent Overstreet 325cb8850cSKent Overstreet if (bio->bi_rw & REQ_WRITE_SAME) 335cb8850cSKent Overstreet return 1; 345cb8850cSKent Overstreet 351e428079SJens Axboe fbio = bio; 36e692cb66SMartin K. Petersen cluster = blk_queue_cluster(q); 375df97b91SMikulas Patocka seg_size = 0; 382c8919deSAndi Kleen nr_phys_segs = 0; 3905f1dd53SJens Axboe high = 0; 401e428079SJens Axboe for_each_bio(bio) { 417988613bSKent Overstreet bio_for_each_segment(bv, bio, iter) { 42d6d48196SJens Axboe /* 4305f1dd53SJens Axboe * If SG merging is disabled, each bio vector is 4405f1dd53SJens Axboe * a segment 4505f1dd53SJens Axboe */ 4605f1dd53SJens Axboe if (no_sg_merge) 4705f1dd53SJens Axboe goto new_segment; 4805f1dd53SJens Axboe 4905f1dd53SJens Axboe /* 501e428079SJens Axboe * the trick here is making sure that a high page is 5105f1dd53SJens Axboe * never considered part of another segment, since 5205f1dd53SJens Axboe * that might change with the bounce page. 53d6d48196SJens Axboe */ 547988613bSKent Overstreet high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); 557988613bSKent Overstreet if (!high && !highprv && cluster) { 567988613bSKent Overstreet if (seg_size + bv.bv_len 57ae03bf63SMartin K. Petersen > queue_max_segment_size(q)) 58d6d48196SJens Axboe goto new_segment; 597988613bSKent Overstreet if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) 60d6d48196SJens Axboe goto new_segment; 617988613bSKent Overstreet if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) 62d6d48196SJens Axboe goto new_segment; 63d6d48196SJens Axboe 647988613bSKent Overstreet seg_size += bv.bv_len; 65d6d48196SJens Axboe bvprv = bv; 66d6d48196SJens Axboe continue; 67d6d48196SJens Axboe } 68d6d48196SJens Axboe new_segment: 691e428079SJens Axboe if (nr_phys_segs == 1 && seg_size > 701e428079SJens Axboe fbio->bi_seg_front_size) 711e428079SJens Axboe fbio->bi_seg_front_size = seg_size; 7286771427SFUJITA Tomonori 73d6d48196SJens Axboe nr_phys_segs++; 74d6d48196SJens Axboe bvprv = bv; 757988613bSKent Overstreet seg_size = bv.bv_len; 76d6d48196SJens Axboe highprv = high; 77d6d48196SJens Axboe } 7859247eaeSJens Axboe bbio = bio; 791e428079SJens Axboe } 80d6d48196SJens Axboe 8159247eaeSJens Axboe if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) 8259247eaeSJens Axboe fbio->bi_seg_front_size = seg_size; 8359247eaeSJens Axboe if (seg_size > bbio->bi_seg_back_size) 8459247eaeSJens Axboe bbio->bi_seg_back_size = seg_size; 851e428079SJens Axboe 861e428079SJens Axboe return nr_phys_segs; 871e428079SJens Axboe } 881e428079SJens Axboe 891e428079SJens Axboe void blk_recalc_rq_segments(struct request *rq) 901e428079SJens Axboe { 9107388549SMing Lei bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, 9207388549SMing Lei &rq->q->queue_flags); 9307388549SMing Lei 9407388549SMing Lei rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, 9507388549SMing Lei no_sg_merge); 96d6d48196SJens Axboe } 97d6d48196SJens Axboe 98d6d48196SJens Axboe void blk_recount_segments(struct request_queue *q, struct bio *bio) 99d6d48196SJens Axboe { 10007388549SMing Lei if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && 10107388549SMing Lei bio->bi_vcnt < queue_max_segments(q)) 10205f1dd53SJens Axboe bio->bi_phys_segments = bio->bi_vcnt; 10305f1dd53SJens Axboe else { 104d6d48196SJens Axboe struct bio *nxt = bio->bi_next; 1051e428079SJens Axboe 106d6d48196SJens Axboe bio->bi_next = NULL; 10707388549SMing Lei bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); 108d6d48196SJens Axboe bio->bi_next = nxt; 10905f1dd53SJens Axboe } 11005f1dd53SJens Axboe 111d6d48196SJens Axboe bio->bi_flags |= (1 << BIO_SEG_VALID); 112d6d48196SJens Axboe } 113d6d48196SJens Axboe EXPORT_SYMBOL(blk_recount_segments); 114d6d48196SJens Axboe 115d6d48196SJens Axboe static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 116d6d48196SJens Axboe struct bio *nxt) 117d6d48196SJens Axboe { 1182b8221e1SKent Overstreet struct bio_vec end_bv = { NULL }, nxt_bv; 119f619d254SKent Overstreet struct bvec_iter iter; 120f619d254SKent Overstreet 121e692cb66SMartin K. Petersen if (!blk_queue_cluster(q)) 122d6d48196SJens Axboe return 0; 123d6d48196SJens Axboe 12486771427SFUJITA Tomonori if (bio->bi_seg_back_size + nxt->bi_seg_front_size > 125ae03bf63SMartin K. Petersen queue_max_segment_size(q)) 126d6d48196SJens Axboe return 0; 127d6d48196SJens Axboe 128e17fc0a1SDavid Woodhouse if (!bio_has_data(bio)) 129e17fc0a1SDavid Woodhouse return 1; 130e17fc0a1SDavid Woodhouse 131f619d254SKent Overstreet bio_for_each_segment(end_bv, bio, iter) 132f619d254SKent Overstreet if (end_bv.bv_len == iter.bi_size) 133f619d254SKent Overstreet break; 134f619d254SKent Overstreet 135f619d254SKent Overstreet nxt_bv = bio_iovec(nxt); 136f619d254SKent Overstreet 137f619d254SKent Overstreet if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) 138e17fc0a1SDavid Woodhouse return 0; 139e17fc0a1SDavid Woodhouse 140d6d48196SJens Axboe /* 141e17fc0a1SDavid Woodhouse * bio and nxt are contiguous in memory; check if the queue allows 142d6d48196SJens Axboe * these two to be merged into one 143d6d48196SJens Axboe */ 144f619d254SKent Overstreet if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) 145d6d48196SJens Axboe return 1; 146d6d48196SJens Axboe 147d6d48196SJens Axboe return 0; 148d6d48196SJens Axboe } 149d6d48196SJens Axboe 1507988613bSKent Overstreet static inline void 151963ab9e5SAsias He __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, 1527988613bSKent Overstreet struct scatterlist *sglist, struct bio_vec *bvprv, 153963ab9e5SAsias He struct scatterlist **sg, int *nsegs, int *cluster) 154963ab9e5SAsias He { 155963ab9e5SAsias He 156963ab9e5SAsias He int nbytes = bvec->bv_len; 157963ab9e5SAsias He 1587988613bSKent Overstreet if (*sg && *cluster) { 159963ab9e5SAsias He if ((*sg)->length + nbytes > queue_max_segment_size(q)) 160963ab9e5SAsias He goto new_segment; 161963ab9e5SAsias He 1627988613bSKent Overstreet if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) 163963ab9e5SAsias He goto new_segment; 1647988613bSKent Overstreet if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) 165963ab9e5SAsias He goto new_segment; 166963ab9e5SAsias He 167963ab9e5SAsias He (*sg)->length += nbytes; 168963ab9e5SAsias He } else { 169963ab9e5SAsias He new_segment: 170963ab9e5SAsias He if (!*sg) 171963ab9e5SAsias He *sg = sglist; 172963ab9e5SAsias He else { 173963ab9e5SAsias He /* 174963ab9e5SAsias He * If the driver previously mapped a shorter 175963ab9e5SAsias He * list, we could see a termination bit 176963ab9e5SAsias He * prematurely unless it fully inits the sg 177963ab9e5SAsias He * table on each mapping. We KNOW that there 178963ab9e5SAsias He * must be more entries here or the driver 179963ab9e5SAsias He * would be buggy, so force clear the 180963ab9e5SAsias He * termination bit to avoid doing a full 181963ab9e5SAsias He * sg_init_table() in drivers for each command. 182963ab9e5SAsias He */ 183c8164d89SPaolo Bonzini sg_unmark_end(*sg); 184963ab9e5SAsias He *sg = sg_next(*sg); 185963ab9e5SAsias He } 186963ab9e5SAsias He 187963ab9e5SAsias He sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); 188963ab9e5SAsias He (*nsegs)++; 189963ab9e5SAsias He } 1907988613bSKent Overstreet *bvprv = *bvec; 191963ab9e5SAsias He } 192963ab9e5SAsias He 1935cb8850cSKent Overstreet static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, 1945cb8850cSKent Overstreet struct scatterlist *sglist, 1955cb8850cSKent Overstreet struct scatterlist **sg) 1965cb8850cSKent Overstreet { 1975cb8850cSKent Overstreet struct bio_vec bvec, bvprv = { NULL }; 1985cb8850cSKent Overstreet struct bvec_iter iter; 1995cb8850cSKent Overstreet int nsegs, cluster; 2005cb8850cSKent Overstreet 2015cb8850cSKent Overstreet nsegs = 0; 2025cb8850cSKent Overstreet cluster = blk_queue_cluster(q); 2035cb8850cSKent Overstreet 2045cb8850cSKent Overstreet if (bio->bi_rw & REQ_DISCARD) { 2055cb8850cSKent Overstreet /* 2065cb8850cSKent Overstreet * This is a hack - drivers should be neither modifying the 2075cb8850cSKent Overstreet * biovec, nor relying on bi_vcnt - but because of 2085cb8850cSKent Overstreet * blk_add_request_payload(), a discard bio may or may not have 2095cb8850cSKent Overstreet * a payload we need to set up here (thank you Christoph) and 2105cb8850cSKent Overstreet * bi_vcnt is really the only way of telling if we need to. 2115cb8850cSKent Overstreet */ 2125cb8850cSKent Overstreet 2135cb8850cSKent Overstreet if (bio->bi_vcnt) 2145cb8850cSKent Overstreet goto single_segment; 2155cb8850cSKent Overstreet 2165cb8850cSKent Overstreet return 0; 2175cb8850cSKent Overstreet } 2185cb8850cSKent Overstreet 2195cb8850cSKent Overstreet if (bio->bi_rw & REQ_WRITE_SAME) { 2205cb8850cSKent Overstreet single_segment: 2215cb8850cSKent Overstreet *sg = sglist; 2225cb8850cSKent Overstreet bvec = bio_iovec(bio); 2235cb8850cSKent Overstreet sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); 2245cb8850cSKent Overstreet return 1; 2255cb8850cSKent Overstreet } 2265cb8850cSKent Overstreet 2275cb8850cSKent Overstreet for_each_bio(bio) 2285cb8850cSKent Overstreet bio_for_each_segment(bvec, bio, iter) 2295cb8850cSKent Overstreet __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, 2305cb8850cSKent Overstreet &nsegs, &cluster); 2315cb8850cSKent Overstreet 2325cb8850cSKent Overstreet return nsegs; 2335cb8850cSKent Overstreet } 2345cb8850cSKent Overstreet 235d6d48196SJens Axboe /* 236d6d48196SJens Axboe * map a request to scatterlist, return number of sg entries setup. Caller 237d6d48196SJens Axboe * must make sure sg can hold rq->nr_phys_segments entries 238d6d48196SJens Axboe */ 239d6d48196SJens Axboe int blk_rq_map_sg(struct request_queue *q, struct request *rq, 240d6d48196SJens Axboe struct scatterlist *sglist) 241d6d48196SJens Axboe { 2425cb8850cSKent Overstreet struct scatterlist *sg = NULL; 2435cb8850cSKent Overstreet int nsegs = 0; 244d6d48196SJens Axboe 2455cb8850cSKent Overstreet if (rq->bio) 2465cb8850cSKent Overstreet nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); 247f18573abSFUJITA Tomonori 248f18573abSFUJITA Tomonori if (unlikely(rq->cmd_flags & REQ_COPY_USER) && 2492e46e8b2STejun Heo (blk_rq_bytes(rq) & q->dma_pad_mask)) { 2502e46e8b2STejun Heo unsigned int pad_len = 2512e46e8b2STejun Heo (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; 252f18573abSFUJITA Tomonori 253f18573abSFUJITA Tomonori sg->length += pad_len; 254f18573abSFUJITA Tomonori rq->extra_len += pad_len; 255f18573abSFUJITA Tomonori } 256f18573abSFUJITA Tomonori 2572fb98e84STejun Heo if (q->dma_drain_size && q->dma_drain_needed(rq)) { 2587b6d91daSChristoph Hellwig if (rq->cmd_flags & REQ_WRITE) 259db0a2e00STejun Heo memset(q->dma_drain_buffer, 0, q->dma_drain_size); 260db0a2e00STejun Heo 261d6d48196SJens Axboe sg->page_link &= ~0x02; 262d6d48196SJens Axboe sg = sg_next(sg); 263d6d48196SJens Axboe sg_set_page(sg, virt_to_page(q->dma_drain_buffer), 264d6d48196SJens Axboe q->dma_drain_size, 265d6d48196SJens Axboe ((unsigned long)q->dma_drain_buffer) & 266d6d48196SJens Axboe (PAGE_SIZE - 1)); 267d6d48196SJens Axboe nsegs++; 2687a85f889SFUJITA Tomonori rq->extra_len += q->dma_drain_size; 269d6d48196SJens Axboe } 270d6d48196SJens Axboe 271d6d48196SJens Axboe if (sg) 272d6d48196SJens Axboe sg_mark_end(sg); 273d6d48196SJens Axboe 274d6d48196SJens Axboe return nsegs; 275d6d48196SJens Axboe } 276d6d48196SJens Axboe EXPORT_SYMBOL(blk_rq_map_sg); 277d6d48196SJens Axboe 27885b9f66aSAsias He /** 27985b9f66aSAsias He * blk_bio_map_sg - map a bio to a scatterlist 28085b9f66aSAsias He * @q: request_queue in question 28185b9f66aSAsias He * @bio: bio being mapped 28285b9f66aSAsias He * @sglist: scatterlist being mapped 28385b9f66aSAsias He * 28485b9f66aSAsias He * Note: 28585b9f66aSAsias He * Caller must make sure sg can hold bio->bi_phys_segments entries 28685b9f66aSAsias He * 28785b9f66aSAsias He * Will return the number of sg entries setup 28885b9f66aSAsias He */ 28985b9f66aSAsias He int blk_bio_map_sg(struct request_queue *q, struct bio *bio, 29085b9f66aSAsias He struct scatterlist *sglist) 29185b9f66aSAsias He { 2925cb8850cSKent Overstreet struct scatterlist *sg = NULL; 2935cb8850cSKent Overstreet int nsegs; 2945cb8850cSKent Overstreet struct bio *next = bio->bi_next; 2955cb8850cSKent Overstreet bio->bi_next = NULL; 29685b9f66aSAsias He 2975cb8850cSKent Overstreet nsegs = __blk_bios_map_sg(q, bio, sglist, &sg); 2985cb8850cSKent Overstreet bio->bi_next = next; 29985b9f66aSAsias He if (sg) 30085b9f66aSAsias He sg_mark_end(sg); 30185b9f66aSAsias He 30285b9f66aSAsias He BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments); 30385b9f66aSAsias He return nsegs; 30485b9f66aSAsias He } 30585b9f66aSAsias He EXPORT_SYMBOL(blk_bio_map_sg); 30685b9f66aSAsias He 307d6d48196SJens Axboe static inline int ll_new_hw_segment(struct request_queue *q, 308d6d48196SJens Axboe struct request *req, 309d6d48196SJens Axboe struct bio *bio) 310d6d48196SJens Axboe { 311d6d48196SJens Axboe int nr_phys_segs = bio_phys_segments(q, bio); 312d6d48196SJens Axboe 31313f05c8dSMartin K. Petersen if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) 31413f05c8dSMartin K. Petersen goto no_merge; 31513f05c8dSMartin K. Petersen 316*4eaf99beSMartin K. Petersen if (blk_integrity_merge_bio(q, req, bio) == false) 31713f05c8dSMartin K. Petersen goto no_merge; 318d6d48196SJens Axboe 319d6d48196SJens Axboe /* 320d6d48196SJens Axboe * This will form the start of a new hw segment. Bump both 321d6d48196SJens Axboe * counters. 322d6d48196SJens Axboe */ 323d6d48196SJens Axboe req->nr_phys_segments += nr_phys_segs; 324d6d48196SJens Axboe return 1; 32513f05c8dSMartin K. Petersen 32613f05c8dSMartin K. Petersen no_merge: 32713f05c8dSMartin K. Petersen req->cmd_flags |= REQ_NOMERGE; 32813f05c8dSMartin K. Petersen if (req == q->last_merge) 32913f05c8dSMartin K. Petersen q->last_merge = NULL; 33013f05c8dSMartin K. Petersen return 0; 331d6d48196SJens Axboe } 332d6d48196SJens Axboe 333d6d48196SJens Axboe int ll_back_merge_fn(struct request_queue *q, struct request *req, 334d6d48196SJens Axboe struct bio *bio) 335d6d48196SJens Axboe { 336f31dc1cdSMartin K. Petersen if (blk_rq_sectors(req) + bio_sectors(bio) > 337f31dc1cdSMartin K. Petersen blk_rq_get_max_sectors(req)) { 338d6d48196SJens Axboe req->cmd_flags |= REQ_NOMERGE; 339d6d48196SJens Axboe if (req == q->last_merge) 340d6d48196SJens Axboe q->last_merge = NULL; 341d6d48196SJens Axboe return 0; 342d6d48196SJens Axboe } 3432cdf79caSJens Axboe if (!bio_flagged(req->biotail, BIO_SEG_VALID)) 344d6d48196SJens Axboe blk_recount_segments(q, req->biotail); 3452cdf79caSJens Axboe if (!bio_flagged(bio, BIO_SEG_VALID)) 346d6d48196SJens Axboe blk_recount_segments(q, bio); 347d6d48196SJens Axboe 348d6d48196SJens Axboe return ll_new_hw_segment(q, req, bio); 349d6d48196SJens Axboe } 350d6d48196SJens Axboe 351d6d48196SJens Axboe int ll_front_merge_fn(struct request_queue *q, struct request *req, 352d6d48196SJens Axboe struct bio *bio) 353d6d48196SJens Axboe { 354f31dc1cdSMartin K. Petersen if (blk_rq_sectors(req) + bio_sectors(bio) > 355f31dc1cdSMartin K. Petersen blk_rq_get_max_sectors(req)) { 356d6d48196SJens Axboe req->cmd_flags |= REQ_NOMERGE; 357d6d48196SJens Axboe if (req == q->last_merge) 358d6d48196SJens Axboe q->last_merge = NULL; 359d6d48196SJens Axboe return 0; 360d6d48196SJens Axboe } 3612cdf79caSJens Axboe if (!bio_flagged(bio, BIO_SEG_VALID)) 362d6d48196SJens Axboe blk_recount_segments(q, bio); 3632cdf79caSJens Axboe if (!bio_flagged(req->bio, BIO_SEG_VALID)) 364d6d48196SJens Axboe blk_recount_segments(q, req->bio); 365d6d48196SJens Axboe 366d6d48196SJens Axboe return ll_new_hw_segment(q, req, bio); 367d6d48196SJens Axboe } 368d6d48196SJens Axboe 369e7e24500SJens Axboe /* 370e7e24500SJens Axboe * blk-mq uses req->special to carry normal driver per-request payload, it 371e7e24500SJens Axboe * does not indicate a prepared command that we cannot merge with. 372e7e24500SJens Axboe */ 373e7e24500SJens Axboe static bool req_no_special_merge(struct request *req) 374e7e24500SJens Axboe { 375e7e24500SJens Axboe struct request_queue *q = req->q; 376e7e24500SJens Axboe 377e7e24500SJens Axboe return !q->mq_ops && req->special; 378e7e24500SJens Axboe } 379e7e24500SJens Axboe 380d6d48196SJens Axboe static int ll_merge_requests_fn(struct request_queue *q, struct request *req, 381d6d48196SJens Axboe struct request *next) 382d6d48196SJens Axboe { 383d6d48196SJens Axboe int total_phys_segments; 38486771427SFUJITA Tomonori unsigned int seg_size = 38586771427SFUJITA Tomonori req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; 386d6d48196SJens Axboe 387d6d48196SJens Axboe /* 388d6d48196SJens Axboe * First check if the either of the requests are re-queued 389d6d48196SJens Axboe * requests. Can't merge them if they are. 390d6d48196SJens Axboe */ 391e7e24500SJens Axboe if (req_no_special_merge(req) || req_no_special_merge(next)) 392d6d48196SJens Axboe return 0; 393d6d48196SJens Axboe 394d6d48196SJens Axboe /* 395d6d48196SJens Axboe * Will it become too large? 396d6d48196SJens Axboe */ 397f31dc1cdSMartin K. Petersen if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > 398f31dc1cdSMartin K. Petersen blk_rq_get_max_sectors(req)) 399d6d48196SJens Axboe return 0; 400d6d48196SJens Axboe 401d6d48196SJens Axboe total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; 40286771427SFUJITA Tomonori if (blk_phys_contig_segment(q, req->biotail, next->bio)) { 40386771427SFUJITA Tomonori if (req->nr_phys_segments == 1) 40486771427SFUJITA Tomonori req->bio->bi_seg_front_size = seg_size; 40586771427SFUJITA Tomonori if (next->nr_phys_segments == 1) 40686771427SFUJITA Tomonori next->biotail->bi_seg_back_size = seg_size; 407d6d48196SJens Axboe total_phys_segments--; 40886771427SFUJITA Tomonori } 409d6d48196SJens Axboe 4108a78362cSMartin K. Petersen if (total_phys_segments > queue_max_segments(q)) 411d6d48196SJens Axboe return 0; 412d6d48196SJens Axboe 413*4eaf99beSMartin K. Petersen if (blk_integrity_merge_rq(q, req, next) == false) 41413f05c8dSMartin K. Petersen return 0; 41513f05c8dSMartin K. Petersen 416d6d48196SJens Axboe /* Merge is OK... */ 417d6d48196SJens Axboe req->nr_phys_segments = total_phys_segments; 418d6d48196SJens Axboe return 1; 419d6d48196SJens Axboe } 420d6d48196SJens Axboe 42180a761fdSTejun Heo /** 42280a761fdSTejun Heo * blk_rq_set_mixed_merge - mark a request as mixed merge 42380a761fdSTejun Heo * @rq: request to mark as mixed merge 42480a761fdSTejun Heo * 42580a761fdSTejun Heo * Description: 42680a761fdSTejun Heo * @rq is about to be mixed merged. Make sure the attributes 42780a761fdSTejun Heo * which can be mixed are set in each bio and mark @rq as mixed 42880a761fdSTejun Heo * merged. 42980a761fdSTejun Heo */ 43080a761fdSTejun Heo void blk_rq_set_mixed_merge(struct request *rq) 43180a761fdSTejun Heo { 43280a761fdSTejun Heo unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 43380a761fdSTejun Heo struct bio *bio; 43480a761fdSTejun Heo 43580a761fdSTejun Heo if (rq->cmd_flags & REQ_MIXED_MERGE) 43680a761fdSTejun Heo return; 43780a761fdSTejun Heo 43880a761fdSTejun Heo /* 43980a761fdSTejun Heo * @rq will no longer represent mixable attributes for all the 44080a761fdSTejun Heo * contained bios. It will just track those of the first one. 44180a761fdSTejun Heo * Distributes the attributs to each bio. 44280a761fdSTejun Heo */ 44380a761fdSTejun Heo for (bio = rq->bio; bio; bio = bio->bi_next) { 44480a761fdSTejun Heo WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && 44580a761fdSTejun Heo (bio->bi_rw & REQ_FAILFAST_MASK) != ff); 44680a761fdSTejun Heo bio->bi_rw |= ff; 44780a761fdSTejun Heo } 44880a761fdSTejun Heo rq->cmd_flags |= REQ_MIXED_MERGE; 44980a761fdSTejun Heo } 45080a761fdSTejun Heo 45126308eabSJerome Marchand static void blk_account_io_merge(struct request *req) 45226308eabSJerome Marchand { 45326308eabSJerome Marchand if (blk_do_io_stat(req)) { 45426308eabSJerome Marchand struct hd_struct *part; 45526308eabSJerome Marchand int cpu; 45626308eabSJerome Marchand 45726308eabSJerome Marchand cpu = part_stat_lock(); 45809e099d4SJerome Marchand part = req->part; 45926308eabSJerome Marchand 46026308eabSJerome Marchand part_round_stats(cpu, part); 461316d315bSNikanth Karthikesan part_dec_in_flight(part, rq_data_dir(req)); 46226308eabSJerome Marchand 4636c23a968SJens Axboe hd_struct_put(part); 46426308eabSJerome Marchand part_stat_unlock(); 46526308eabSJerome Marchand } 46626308eabSJerome Marchand } 46726308eabSJerome Marchand 468d6d48196SJens Axboe /* 469d6d48196SJens Axboe * Has to be called with the request spinlock acquired 470d6d48196SJens Axboe */ 471d6d48196SJens Axboe static int attempt_merge(struct request_queue *q, struct request *req, 472d6d48196SJens Axboe struct request *next) 473d6d48196SJens Axboe { 474d6d48196SJens Axboe if (!rq_mergeable(req) || !rq_mergeable(next)) 475d6d48196SJens Axboe return 0; 476d6d48196SJens Axboe 477f31dc1cdSMartin K. Petersen if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) 478f31dc1cdSMartin K. Petersen return 0; 479f31dc1cdSMartin K. Petersen 480d6d48196SJens Axboe /* 481d6d48196SJens Axboe * not contiguous 482d6d48196SJens Axboe */ 48383096ebfSTejun Heo if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) 484d6d48196SJens Axboe return 0; 485d6d48196SJens Axboe 486d6d48196SJens Axboe if (rq_data_dir(req) != rq_data_dir(next) 487d6d48196SJens Axboe || req->rq_disk != next->rq_disk 488e7e24500SJens Axboe || req_no_special_merge(next)) 489d6d48196SJens Axboe return 0; 490d6d48196SJens Axboe 4914363ac7cSMartin K. Petersen if (req->cmd_flags & REQ_WRITE_SAME && 4924363ac7cSMartin K. Petersen !blk_write_same_mergeable(req->bio, next->bio)) 4934363ac7cSMartin K. Petersen return 0; 4944363ac7cSMartin K. Petersen 495d6d48196SJens Axboe /* 496d6d48196SJens Axboe * If we are allowed to merge, then append bio list 497d6d48196SJens Axboe * from next to rq and release next. merge_requests_fn 498d6d48196SJens Axboe * will have updated segment counts, update sector 499d6d48196SJens Axboe * counts here. 500d6d48196SJens Axboe */ 501d6d48196SJens Axboe if (!ll_merge_requests_fn(q, req, next)) 502d6d48196SJens Axboe return 0; 503d6d48196SJens Axboe 504d6d48196SJens Axboe /* 50580a761fdSTejun Heo * If failfast settings disagree or any of the two is already 50680a761fdSTejun Heo * a mixed merge, mark both as mixed before proceeding. This 50780a761fdSTejun Heo * makes sure that all involved bios have mixable attributes 50880a761fdSTejun Heo * set properly. 50980a761fdSTejun Heo */ 51080a761fdSTejun Heo if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || 51180a761fdSTejun Heo (req->cmd_flags & REQ_FAILFAST_MASK) != 51280a761fdSTejun Heo (next->cmd_flags & REQ_FAILFAST_MASK)) { 51380a761fdSTejun Heo blk_rq_set_mixed_merge(req); 51480a761fdSTejun Heo blk_rq_set_mixed_merge(next); 51580a761fdSTejun Heo } 51680a761fdSTejun Heo 51780a761fdSTejun Heo /* 518d6d48196SJens Axboe * At this point we have either done a back merge 519d6d48196SJens Axboe * or front merge. We need the smaller start_time of 520d6d48196SJens Axboe * the merged requests to be the current request 521d6d48196SJens Axboe * for accounting purposes. 522d6d48196SJens Axboe */ 523d6d48196SJens Axboe if (time_after(req->start_time, next->start_time)) 524d6d48196SJens Axboe req->start_time = next->start_time; 525d6d48196SJens Axboe 526d6d48196SJens Axboe req->biotail->bi_next = next->bio; 527d6d48196SJens Axboe req->biotail = next->biotail; 528d6d48196SJens Axboe 529a2dec7b3STejun Heo req->__data_len += blk_rq_bytes(next); 530d6d48196SJens Axboe 531d6d48196SJens Axboe elv_merge_requests(q, req, next); 532d6d48196SJens Axboe 53342dad764SJerome Marchand /* 53442dad764SJerome Marchand * 'next' is going away, so update stats accordingly 53542dad764SJerome Marchand */ 53642dad764SJerome Marchand blk_account_io_merge(next); 537d6d48196SJens Axboe 538d6d48196SJens Axboe req->ioprio = ioprio_best(req->ioprio, next->ioprio); 539ab780f1eSJens Axboe if (blk_rq_cpu_valid(next)) 540ab780f1eSJens Axboe req->cpu = next->cpu; 541d6d48196SJens Axboe 5421cd96c24SBoaz Harrosh /* owner-ship of bio passed from next to req */ 5431cd96c24SBoaz Harrosh next->bio = NULL; 544d6d48196SJens Axboe __blk_put_request(q, next); 545d6d48196SJens Axboe return 1; 546d6d48196SJens Axboe } 547d6d48196SJens Axboe 548d6d48196SJens Axboe int attempt_back_merge(struct request_queue *q, struct request *rq) 549d6d48196SJens Axboe { 550d6d48196SJens Axboe struct request *next = elv_latter_request(q, rq); 551d6d48196SJens Axboe 552d6d48196SJens Axboe if (next) 553d6d48196SJens Axboe return attempt_merge(q, rq, next); 554d6d48196SJens Axboe 555d6d48196SJens Axboe return 0; 556d6d48196SJens Axboe } 557d6d48196SJens Axboe 558d6d48196SJens Axboe int attempt_front_merge(struct request_queue *q, struct request *rq) 559d6d48196SJens Axboe { 560d6d48196SJens Axboe struct request *prev = elv_former_request(q, rq); 561d6d48196SJens Axboe 562d6d48196SJens Axboe if (prev) 563d6d48196SJens Axboe return attempt_merge(q, prev, rq); 564d6d48196SJens Axboe 565d6d48196SJens Axboe return 0; 566d6d48196SJens Axboe } 5675e84ea3aSJens Axboe 5685e84ea3aSJens Axboe int blk_attempt_req_merge(struct request_queue *q, struct request *rq, 5695e84ea3aSJens Axboe struct request *next) 5705e84ea3aSJens Axboe { 5715e84ea3aSJens Axboe return attempt_merge(q, rq, next); 5725e84ea3aSJens Axboe } 573050c8ea8STejun Heo 574050c8ea8STejun Heo bool blk_rq_merge_ok(struct request *rq, struct bio *bio) 575050c8ea8STejun Heo { 57666cb45aaSJens Axboe struct request_queue *q = rq->q; 57766cb45aaSJens Axboe 578e2a60da7SMartin K. Petersen if (!rq_mergeable(rq) || !bio_mergeable(bio)) 579050c8ea8STejun Heo return false; 580050c8ea8STejun Heo 581f31dc1cdSMartin K. Petersen if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) 582f31dc1cdSMartin K. Petersen return false; 583f31dc1cdSMartin K. Petersen 584050c8ea8STejun Heo /* different data direction or already started, don't merge */ 585050c8ea8STejun Heo if (bio_data_dir(bio) != rq_data_dir(rq)) 586050c8ea8STejun Heo return false; 587050c8ea8STejun Heo 588050c8ea8STejun Heo /* must be same device and not a special request */ 589e7e24500SJens Axboe if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) 590050c8ea8STejun Heo return false; 591050c8ea8STejun Heo 592050c8ea8STejun Heo /* only merge integrity protected bio into ditto rq */ 593*4eaf99beSMartin K. Petersen if (blk_integrity_merge_bio(rq->q, rq, bio) == false) 594050c8ea8STejun Heo return false; 595050c8ea8STejun Heo 5964363ac7cSMartin K. Petersen /* must be using the same buffer */ 5974363ac7cSMartin K. Petersen if (rq->cmd_flags & REQ_WRITE_SAME && 5984363ac7cSMartin K. Petersen !blk_write_same_mergeable(rq->bio, bio)) 5994363ac7cSMartin K. Petersen return false; 6004363ac7cSMartin K. Petersen 60166cb45aaSJens Axboe if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { 60266cb45aaSJens Axboe struct bio_vec *bprev; 60366cb45aaSJens Axboe 60466cb45aaSJens Axboe bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1]; 60566cb45aaSJens Axboe if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) 60666cb45aaSJens Axboe return false; 60766cb45aaSJens Axboe } 60866cb45aaSJens Axboe 609050c8ea8STejun Heo return true; 610050c8ea8STejun Heo } 611050c8ea8STejun Heo 612050c8ea8STejun Heo int blk_try_merge(struct request *rq, struct bio *bio) 613050c8ea8STejun Heo { 6144f024f37SKent Overstreet if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) 615050c8ea8STejun Heo return ELEVATOR_BACK_MERGE; 6164f024f37SKent Overstreet else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) 617050c8ea8STejun Heo return ELEVATOR_FRONT_MERGE; 618050c8ea8STejun Heo return ELEVATOR_NO_MERGE; 619050c8ea8STejun Heo } 620