xref: /openbmc/linux/block/blk-merge.c (revision 5cb8850c)
1d6d48196SJens Axboe /*
2d6d48196SJens Axboe  * Functions related to segment and merge handling
3d6d48196SJens Axboe  */
4d6d48196SJens Axboe #include <linux/kernel.h>
5d6d48196SJens Axboe #include <linux/module.h>
6d6d48196SJens Axboe #include <linux/bio.h>
7d6d48196SJens Axboe #include <linux/blkdev.h>
8d6d48196SJens Axboe #include <linux/scatterlist.h>
9d6d48196SJens Axboe 
10d6d48196SJens Axboe #include "blk.h"
11d6d48196SJens Axboe 
121e428079SJens Axboe static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
1359247eaeSJens Axboe 					     struct bio *bio)
14d6d48196SJens Axboe {
157988613bSKent Overstreet 	struct bio_vec bv, bvprv = { NULL };
167988613bSKent Overstreet 	int cluster, high, highprv = 1;
171e428079SJens Axboe 	unsigned int seg_size, nr_phys_segs;
1859247eaeSJens Axboe 	struct bio *fbio, *bbio;
197988613bSKent Overstreet 	struct bvec_iter iter;
20d6d48196SJens Axboe 
211e428079SJens Axboe 	if (!bio)
221e428079SJens Axboe 		return 0;
23d6d48196SJens Axboe 
245cb8850cSKent Overstreet 	/*
255cb8850cSKent Overstreet 	 * This should probably be returning 0, but blk_add_request_payload()
265cb8850cSKent Overstreet 	 * (Christoph!!!!)
275cb8850cSKent Overstreet 	 */
285cb8850cSKent Overstreet 	if (bio->bi_rw & REQ_DISCARD)
295cb8850cSKent Overstreet 		return 1;
305cb8850cSKent Overstreet 
315cb8850cSKent Overstreet 	if (bio->bi_rw & REQ_WRITE_SAME)
325cb8850cSKent Overstreet 		return 1;
335cb8850cSKent Overstreet 
341e428079SJens Axboe 	fbio = bio;
35e692cb66SMartin K. Petersen 	cluster = blk_queue_cluster(q);
365df97b91SMikulas Patocka 	seg_size = 0;
372c8919deSAndi Kleen 	nr_phys_segs = 0;
381e428079SJens Axboe 	for_each_bio(bio) {
397988613bSKent Overstreet 		bio_for_each_segment(bv, bio, iter) {
40d6d48196SJens Axboe 			/*
411e428079SJens Axboe 			 * the trick here is making sure that a high page is
421e428079SJens Axboe 			 * never considered part of another segment, since that
431e428079SJens Axboe 			 * might change with the bounce page.
44d6d48196SJens Axboe 			 */
457988613bSKent Overstreet 			high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
467988613bSKent Overstreet 			if (!high && !highprv && cluster) {
477988613bSKent Overstreet 				if (seg_size + bv.bv_len
48ae03bf63SMartin K. Petersen 				    > queue_max_segment_size(q))
49d6d48196SJens Axboe 					goto new_segment;
507988613bSKent Overstreet 				if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
51d6d48196SJens Axboe 					goto new_segment;
527988613bSKent Overstreet 				if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
53d6d48196SJens Axboe 					goto new_segment;
54d6d48196SJens Axboe 
557988613bSKent Overstreet 				seg_size += bv.bv_len;
56d6d48196SJens Axboe 				bvprv = bv;
57d6d48196SJens Axboe 				continue;
58d6d48196SJens Axboe 			}
59d6d48196SJens Axboe new_segment:
601e428079SJens Axboe 			if (nr_phys_segs == 1 && seg_size >
611e428079SJens Axboe 			    fbio->bi_seg_front_size)
621e428079SJens Axboe 				fbio->bi_seg_front_size = seg_size;
6386771427SFUJITA Tomonori 
64d6d48196SJens Axboe 			nr_phys_segs++;
65d6d48196SJens Axboe 			bvprv = bv;
667988613bSKent Overstreet 			seg_size = bv.bv_len;
67d6d48196SJens Axboe 			highprv = high;
68d6d48196SJens Axboe 		}
6959247eaeSJens Axboe 		bbio = bio;
701e428079SJens Axboe 	}
71d6d48196SJens Axboe 
7259247eaeSJens Axboe 	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
7359247eaeSJens Axboe 		fbio->bi_seg_front_size = seg_size;
7459247eaeSJens Axboe 	if (seg_size > bbio->bi_seg_back_size)
7559247eaeSJens Axboe 		bbio->bi_seg_back_size = seg_size;
761e428079SJens Axboe 
771e428079SJens Axboe 	return nr_phys_segs;
781e428079SJens Axboe }
791e428079SJens Axboe 
801e428079SJens Axboe void blk_recalc_rq_segments(struct request *rq)
811e428079SJens Axboe {
8259247eaeSJens Axboe 	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
83d6d48196SJens Axboe }
84d6d48196SJens Axboe 
85d6d48196SJens Axboe void blk_recount_segments(struct request_queue *q, struct bio *bio)
86d6d48196SJens Axboe {
87d6d48196SJens Axboe 	struct bio *nxt = bio->bi_next;
881e428079SJens Axboe 
89d6d48196SJens Axboe 	bio->bi_next = NULL;
9059247eaeSJens Axboe 	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
91d6d48196SJens Axboe 	bio->bi_next = nxt;
92d6d48196SJens Axboe 	bio->bi_flags |= (1 << BIO_SEG_VALID);
93d6d48196SJens Axboe }
94d6d48196SJens Axboe EXPORT_SYMBOL(blk_recount_segments);
95d6d48196SJens Axboe 
96d6d48196SJens Axboe static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
97d6d48196SJens Axboe 				   struct bio *nxt)
98d6d48196SJens Axboe {
992b8221e1SKent Overstreet 	struct bio_vec end_bv = { NULL }, nxt_bv;
100f619d254SKent Overstreet 	struct bvec_iter iter;
101f619d254SKent Overstreet 
102e692cb66SMartin K. Petersen 	if (!blk_queue_cluster(q))
103d6d48196SJens Axboe 		return 0;
104d6d48196SJens Axboe 
10586771427SFUJITA Tomonori 	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
106ae03bf63SMartin K. Petersen 	    queue_max_segment_size(q))
107d6d48196SJens Axboe 		return 0;
108d6d48196SJens Axboe 
109e17fc0a1SDavid Woodhouse 	if (!bio_has_data(bio))
110e17fc0a1SDavid Woodhouse 		return 1;
111e17fc0a1SDavid Woodhouse 
112f619d254SKent Overstreet 	bio_for_each_segment(end_bv, bio, iter)
113f619d254SKent Overstreet 		if (end_bv.bv_len == iter.bi_size)
114f619d254SKent Overstreet 			break;
115f619d254SKent Overstreet 
116f619d254SKent Overstreet 	nxt_bv = bio_iovec(nxt);
117f619d254SKent Overstreet 
118f619d254SKent Overstreet 	if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
119e17fc0a1SDavid Woodhouse 		return 0;
120e17fc0a1SDavid Woodhouse 
121d6d48196SJens Axboe 	/*
122e17fc0a1SDavid Woodhouse 	 * bio and nxt are contiguous in memory; check if the queue allows
123d6d48196SJens Axboe 	 * these two to be merged into one
124d6d48196SJens Axboe 	 */
125f619d254SKent Overstreet 	if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
126d6d48196SJens Axboe 		return 1;
127d6d48196SJens Axboe 
128d6d48196SJens Axboe 	return 0;
129d6d48196SJens Axboe }
130d6d48196SJens Axboe 
1317988613bSKent Overstreet static inline void
132963ab9e5SAsias He __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
1337988613bSKent Overstreet 		     struct scatterlist *sglist, struct bio_vec *bvprv,
134963ab9e5SAsias He 		     struct scatterlist **sg, int *nsegs, int *cluster)
135963ab9e5SAsias He {
136963ab9e5SAsias He 
137963ab9e5SAsias He 	int nbytes = bvec->bv_len;
138963ab9e5SAsias He 
1397988613bSKent Overstreet 	if (*sg && *cluster) {
140963ab9e5SAsias He 		if ((*sg)->length + nbytes > queue_max_segment_size(q))
141963ab9e5SAsias He 			goto new_segment;
142963ab9e5SAsias He 
1437988613bSKent Overstreet 		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
144963ab9e5SAsias He 			goto new_segment;
1457988613bSKent Overstreet 		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
146963ab9e5SAsias He 			goto new_segment;
147963ab9e5SAsias He 
148963ab9e5SAsias He 		(*sg)->length += nbytes;
149963ab9e5SAsias He 	} else {
150963ab9e5SAsias He new_segment:
151963ab9e5SAsias He 		if (!*sg)
152963ab9e5SAsias He 			*sg = sglist;
153963ab9e5SAsias He 		else {
154963ab9e5SAsias He 			/*
155963ab9e5SAsias He 			 * If the driver previously mapped a shorter
156963ab9e5SAsias He 			 * list, we could see a termination bit
157963ab9e5SAsias He 			 * prematurely unless it fully inits the sg
158963ab9e5SAsias He 			 * table on each mapping. We KNOW that there
159963ab9e5SAsias He 			 * must be more entries here or the driver
160963ab9e5SAsias He 			 * would be buggy, so force clear the
161963ab9e5SAsias He 			 * termination bit to avoid doing a full
162963ab9e5SAsias He 			 * sg_init_table() in drivers for each command.
163963ab9e5SAsias He 			 */
164c8164d89SPaolo Bonzini 			sg_unmark_end(*sg);
165963ab9e5SAsias He 			*sg = sg_next(*sg);
166963ab9e5SAsias He 		}
167963ab9e5SAsias He 
168963ab9e5SAsias He 		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
169963ab9e5SAsias He 		(*nsegs)++;
170963ab9e5SAsias He 	}
1717988613bSKent Overstreet 	*bvprv = *bvec;
172963ab9e5SAsias He }
173963ab9e5SAsias He 
1745cb8850cSKent Overstreet static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
1755cb8850cSKent Overstreet 			     struct scatterlist *sglist,
1765cb8850cSKent Overstreet 			     struct scatterlist **sg)
1775cb8850cSKent Overstreet {
1785cb8850cSKent Overstreet 	struct bio_vec bvec, bvprv = { NULL };
1795cb8850cSKent Overstreet 	struct bvec_iter iter;
1805cb8850cSKent Overstreet 	int nsegs, cluster;
1815cb8850cSKent Overstreet 
1825cb8850cSKent Overstreet 	nsegs = 0;
1835cb8850cSKent Overstreet 	cluster = blk_queue_cluster(q);
1845cb8850cSKent Overstreet 
1855cb8850cSKent Overstreet 	if (bio->bi_rw & REQ_DISCARD) {
1865cb8850cSKent Overstreet 		/*
1875cb8850cSKent Overstreet 		 * This is a hack - drivers should be neither modifying the
1885cb8850cSKent Overstreet 		 * biovec, nor relying on bi_vcnt - but because of
1895cb8850cSKent Overstreet 		 * blk_add_request_payload(), a discard bio may or may not have
1905cb8850cSKent Overstreet 		 * a payload we need to set up here (thank you Christoph) and
1915cb8850cSKent Overstreet 		 * bi_vcnt is really the only way of telling if we need to.
1925cb8850cSKent Overstreet 		 */
1935cb8850cSKent Overstreet 
1945cb8850cSKent Overstreet 		if (bio->bi_vcnt)
1955cb8850cSKent Overstreet 			goto single_segment;
1965cb8850cSKent Overstreet 
1975cb8850cSKent Overstreet 		return 0;
1985cb8850cSKent Overstreet 	}
1995cb8850cSKent Overstreet 
2005cb8850cSKent Overstreet 	if (bio->bi_rw & REQ_WRITE_SAME) {
2015cb8850cSKent Overstreet single_segment:
2025cb8850cSKent Overstreet 		*sg = sglist;
2035cb8850cSKent Overstreet 		bvec = bio_iovec(bio);
2045cb8850cSKent Overstreet 		sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
2055cb8850cSKent Overstreet 		return 1;
2065cb8850cSKent Overstreet 	}
2075cb8850cSKent Overstreet 
2085cb8850cSKent Overstreet 	for_each_bio(bio)
2095cb8850cSKent Overstreet 		bio_for_each_segment(bvec, bio, iter)
2105cb8850cSKent Overstreet 			__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
2115cb8850cSKent Overstreet 					     &nsegs, &cluster);
2125cb8850cSKent Overstreet 
2135cb8850cSKent Overstreet 	return nsegs;
2145cb8850cSKent Overstreet }
2155cb8850cSKent Overstreet 
216d6d48196SJens Axboe /*
217d6d48196SJens Axboe  * map a request to scatterlist, return number of sg entries setup. Caller
218d6d48196SJens Axboe  * must make sure sg can hold rq->nr_phys_segments entries
219d6d48196SJens Axboe  */
220d6d48196SJens Axboe int blk_rq_map_sg(struct request_queue *q, struct request *rq,
221d6d48196SJens Axboe 		  struct scatterlist *sglist)
222d6d48196SJens Axboe {
2235cb8850cSKent Overstreet 	struct scatterlist *sg = NULL;
2245cb8850cSKent Overstreet 	int nsegs = 0;
225d6d48196SJens Axboe 
2265cb8850cSKent Overstreet 	if (rq->bio)
2275cb8850cSKent Overstreet 		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
228f18573abSFUJITA Tomonori 
229f18573abSFUJITA Tomonori 	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
2302e46e8b2STejun Heo 	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
2312e46e8b2STejun Heo 		unsigned int pad_len =
2322e46e8b2STejun Heo 			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
233f18573abSFUJITA Tomonori 
234f18573abSFUJITA Tomonori 		sg->length += pad_len;
235f18573abSFUJITA Tomonori 		rq->extra_len += pad_len;
236f18573abSFUJITA Tomonori 	}
237f18573abSFUJITA Tomonori 
2382fb98e84STejun Heo 	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
2397b6d91daSChristoph Hellwig 		if (rq->cmd_flags & REQ_WRITE)
240db0a2e00STejun Heo 			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
241db0a2e00STejun Heo 
242d6d48196SJens Axboe 		sg->page_link &= ~0x02;
243d6d48196SJens Axboe 		sg = sg_next(sg);
244d6d48196SJens Axboe 		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
245d6d48196SJens Axboe 			    q->dma_drain_size,
246d6d48196SJens Axboe 			    ((unsigned long)q->dma_drain_buffer) &
247d6d48196SJens Axboe 			    (PAGE_SIZE - 1));
248d6d48196SJens Axboe 		nsegs++;
2497a85f889SFUJITA Tomonori 		rq->extra_len += q->dma_drain_size;
250d6d48196SJens Axboe 	}
251d6d48196SJens Axboe 
252d6d48196SJens Axboe 	if (sg)
253d6d48196SJens Axboe 		sg_mark_end(sg);
254d6d48196SJens Axboe 
255d6d48196SJens Axboe 	return nsegs;
256d6d48196SJens Axboe }
257d6d48196SJens Axboe EXPORT_SYMBOL(blk_rq_map_sg);
258d6d48196SJens Axboe 
25985b9f66aSAsias He /**
26085b9f66aSAsias He  * blk_bio_map_sg - map a bio to a scatterlist
26185b9f66aSAsias He  * @q: request_queue in question
26285b9f66aSAsias He  * @bio: bio being mapped
26385b9f66aSAsias He  * @sglist: scatterlist being mapped
26485b9f66aSAsias He  *
26585b9f66aSAsias He  * Note:
26685b9f66aSAsias He  *    Caller must make sure sg can hold bio->bi_phys_segments entries
26785b9f66aSAsias He  *
26885b9f66aSAsias He  * Will return the number of sg entries setup
26985b9f66aSAsias He  */
27085b9f66aSAsias He int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
27185b9f66aSAsias He 		   struct scatterlist *sglist)
27285b9f66aSAsias He {
2735cb8850cSKent Overstreet 	struct scatterlist *sg = NULL;
2745cb8850cSKent Overstreet 	int nsegs;
2755cb8850cSKent Overstreet 	struct bio *next = bio->bi_next;
2765cb8850cSKent Overstreet 	bio->bi_next = NULL;
27785b9f66aSAsias He 
2785cb8850cSKent Overstreet 	nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
2795cb8850cSKent Overstreet 	bio->bi_next = next;
28085b9f66aSAsias He 	if (sg)
28185b9f66aSAsias He 		sg_mark_end(sg);
28285b9f66aSAsias He 
28385b9f66aSAsias He 	BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
28485b9f66aSAsias He 	return nsegs;
28585b9f66aSAsias He }
28685b9f66aSAsias He EXPORT_SYMBOL(blk_bio_map_sg);
28785b9f66aSAsias He 
288d6d48196SJens Axboe static inline int ll_new_hw_segment(struct request_queue *q,
289d6d48196SJens Axboe 				    struct request *req,
290d6d48196SJens Axboe 				    struct bio *bio)
291d6d48196SJens Axboe {
292d6d48196SJens Axboe 	int nr_phys_segs = bio_phys_segments(q, bio);
293d6d48196SJens Axboe 
29413f05c8dSMartin K. Petersen 	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
29513f05c8dSMartin K. Petersen 		goto no_merge;
29613f05c8dSMartin K. Petersen 
29713f05c8dSMartin K. Petersen 	if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
29813f05c8dSMartin K. Petersen 		goto no_merge;
299d6d48196SJens Axboe 
300d6d48196SJens Axboe 	/*
301d6d48196SJens Axboe 	 * This will form the start of a new hw segment.  Bump both
302d6d48196SJens Axboe 	 * counters.
303d6d48196SJens Axboe 	 */
304d6d48196SJens Axboe 	req->nr_phys_segments += nr_phys_segs;
305d6d48196SJens Axboe 	return 1;
30613f05c8dSMartin K. Petersen 
30713f05c8dSMartin K. Petersen no_merge:
30813f05c8dSMartin K. Petersen 	req->cmd_flags |= REQ_NOMERGE;
30913f05c8dSMartin K. Petersen 	if (req == q->last_merge)
31013f05c8dSMartin K. Petersen 		q->last_merge = NULL;
31113f05c8dSMartin K. Petersen 	return 0;
312d6d48196SJens Axboe }
313d6d48196SJens Axboe 
314d6d48196SJens Axboe int ll_back_merge_fn(struct request_queue *q, struct request *req,
315d6d48196SJens Axboe 		     struct bio *bio)
316d6d48196SJens Axboe {
317f31dc1cdSMartin K. Petersen 	if (blk_rq_sectors(req) + bio_sectors(bio) >
318f31dc1cdSMartin K. Petersen 	    blk_rq_get_max_sectors(req)) {
319d6d48196SJens Axboe 		req->cmd_flags |= REQ_NOMERGE;
320d6d48196SJens Axboe 		if (req == q->last_merge)
321d6d48196SJens Axboe 			q->last_merge = NULL;
322d6d48196SJens Axboe 		return 0;
323d6d48196SJens Axboe 	}
3242cdf79caSJens Axboe 	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
325d6d48196SJens Axboe 		blk_recount_segments(q, req->biotail);
3262cdf79caSJens Axboe 	if (!bio_flagged(bio, BIO_SEG_VALID))
327d6d48196SJens Axboe 		blk_recount_segments(q, bio);
328d6d48196SJens Axboe 
329d6d48196SJens Axboe 	return ll_new_hw_segment(q, req, bio);
330d6d48196SJens Axboe }
331d6d48196SJens Axboe 
332d6d48196SJens Axboe int ll_front_merge_fn(struct request_queue *q, struct request *req,
333d6d48196SJens Axboe 		      struct bio *bio)
334d6d48196SJens Axboe {
335f31dc1cdSMartin K. Petersen 	if (blk_rq_sectors(req) + bio_sectors(bio) >
336f31dc1cdSMartin K. Petersen 	    blk_rq_get_max_sectors(req)) {
337d6d48196SJens Axboe 		req->cmd_flags |= REQ_NOMERGE;
338d6d48196SJens Axboe 		if (req == q->last_merge)
339d6d48196SJens Axboe 			q->last_merge = NULL;
340d6d48196SJens Axboe 		return 0;
341d6d48196SJens Axboe 	}
3422cdf79caSJens Axboe 	if (!bio_flagged(bio, BIO_SEG_VALID))
343d6d48196SJens Axboe 		blk_recount_segments(q, bio);
3442cdf79caSJens Axboe 	if (!bio_flagged(req->bio, BIO_SEG_VALID))
345d6d48196SJens Axboe 		blk_recount_segments(q, req->bio);
346d6d48196SJens Axboe 
347d6d48196SJens Axboe 	return ll_new_hw_segment(q, req, bio);
348d6d48196SJens Axboe }
349d6d48196SJens Axboe 
350e7e24500SJens Axboe /*
351e7e24500SJens Axboe  * blk-mq uses req->special to carry normal driver per-request payload, it
352e7e24500SJens Axboe  * does not indicate a prepared command that we cannot merge with.
353e7e24500SJens Axboe  */
354e7e24500SJens Axboe static bool req_no_special_merge(struct request *req)
355e7e24500SJens Axboe {
356e7e24500SJens Axboe 	struct request_queue *q = req->q;
357e7e24500SJens Axboe 
358e7e24500SJens Axboe 	return !q->mq_ops && req->special;
359e7e24500SJens Axboe }
360e7e24500SJens Axboe 
361d6d48196SJens Axboe static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
362d6d48196SJens Axboe 				struct request *next)
363d6d48196SJens Axboe {
364d6d48196SJens Axboe 	int total_phys_segments;
36586771427SFUJITA Tomonori 	unsigned int seg_size =
36686771427SFUJITA Tomonori 		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
367d6d48196SJens Axboe 
368d6d48196SJens Axboe 	/*
369d6d48196SJens Axboe 	 * First check if the either of the requests are re-queued
370d6d48196SJens Axboe 	 * requests.  Can't merge them if they are.
371d6d48196SJens Axboe 	 */
372e7e24500SJens Axboe 	if (req_no_special_merge(req) || req_no_special_merge(next))
373d6d48196SJens Axboe 		return 0;
374d6d48196SJens Axboe 
375d6d48196SJens Axboe 	/*
376d6d48196SJens Axboe 	 * Will it become too large?
377d6d48196SJens Axboe 	 */
378f31dc1cdSMartin K. Petersen 	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
379f31dc1cdSMartin K. Petersen 	    blk_rq_get_max_sectors(req))
380d6d48196SJens Axboe 		return 0;
381d6d48196SJens Axboe 
382d6d48196SJens Axboe 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
38386771427SFUJITA Tomonori 	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
38486771427SFUJITA Tomonori 		if (req->nr_phys_segments == 1)
38586771427SFUJITA Tomonori 			req->bio->bi_seg_front_size = seg_size;
38686771427SFUJITA Tomonori 		if (next->nr_phys_segments == 1)
38786771427SFUJITA Tomonori 			next->biotail->bi_seg_back_size = seg_size;
388d6d48196SJens Axboe 		total_phys_segments--;
38986771427SFUJITA Tomonori 	}
390d6d48196SJens Axboe 
3918a78362cSMartin K. Petersen 	if (total_phys_segments > queue_max_segments(q))
392d6d48196SJens Axboe 		return 0;
393d6d48196SJens Axboe 
39413f05c8dSMartin K. Petersen 	if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
39513f05c8dSMartin K. Petersen 		return 0;
39613f05c8dSMartin K. Petersen 
397d6d48196SJens Axboe 	/* Merge is OK... */
398d6d48196SJens Axboe 	req->nr_phys_segments = total_phys_segments;
399d6d48196SJens Axboe 	return 1;
400d6d48196SJens Axboe }
401d6d48196SJens Axboe 
40280a761fdSTejun Heo /**
40380a761fdSTejun Heo  * blk_rq_set_mixed_merge - mark a request as mixed merge
40480a761fdSTejun Heo  * @rq: request to mark as mixed merge
40580a761fdSTejun Heo  *
40680a761fdSTejun Heo  * Description:
40780a761fdSTejun Heo  *     @rq is about to be mixed merged.  Make sure the attributes
40880a761fdSTejun Heo  *     which can be mixed are set in each bio and mark @rq as mixed
40980a761fdSTejun Heo  *     merged.
41080a761fdSTejun Heo  */
41180a761fdSTejun Heo void blk_rq_set_mixed_merge(struct request *rq)
41280a761fdSTejun Heo {
41380a761fdSTejun Heo 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
41480a761fdSTejun Heo 	struct bio *bio;
41580a761fdSTejun Heo 
41680a761fdSTejun Heo 	if (rq->cmd_flags & REQ_MIXED_MERGE)
41780a761fdSTejun Heo 		return;
41880a761fdSTejun Heo 
41980a761fdSTejun Heo 	/*
42080a761fdSTejun Heo 	 * @rq will no longer represent mixable attributes for all the
42180a761fdSTejun Heo 	 * contained bios.  It will just track those of the first one.
42280a761fdSTejun Heo 	 * Distributes the attributs to each bio.
42380a761fdSTejun Heo 	 */
42480a761fdSTejun Heo 	for (bio = rq->bio; bio; bio = bio->bi_next) {
42580a761fdSTejun Heo 		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
42680a761fdSTejun Heo 			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
42780a761fdSTejun Heo 		bio->bi_rw |= ff;
42880a761fdSTejun Heo 	}
42980a761fdSTejun Heo 	rq->cmd_flags |= REQ_MIXED_MERGE;
43080a761fdSTejun Heo }
43180a761fdSTejun Heo 
43226308eabSJerome Marchand static void blk_account_io_merge(struct request *req)
43326308eabSJerome Marchand {
43426308eabSJerome Marchand 	if (blk_do_io_stat(req)) {
43526308eabSJerome Marchand 		struct hd_struct *part;
43626308eabSJerome Marchand 		int cpu;
43726308eabSJerome Marchand 
43826308eabSJerome Marchand 		cpu = part_stat_lock();
43909e099d4SJerome Marchand 		part = req->part;
44026308eabSJerome Marchand 
44126308eabSJerome Marchand 		part_round_stats(cpu, part);
442316d315bSNikanth Karthikesan 		part_dec_in_flight(part, rq_data_dir(req));
44326308eabSJerome Marchand 
4446c23a968SJens Axboe 		hd_struct_put(part);
44526308eabSJerome Marchand 		part_stat_unlock();
44626308eabSJerome Marchand 	}
44726308eabSJerome Marchand }
44826308eabSJerome Marchand 
449d6d48196SJens Axboe /*
450d6d48196SJens Axboe  * Has to be called with the request spinlock acquired
451d6d48196SJens Axboe  */
452d6d48196SJens Axboe static int attempt_merge(struct request_queue *q, struct request *req,
453d6d48196SJens Axboe 			  struct request *next)
454d6d48196SJens Axboe {
455d6d48196SJens Axboe 	if (!rq_mergeable(req) || !rq_mergeable(next))
456d6d48196SJens Axboe 		return 0;
457d6d48196SJens Axboe 
458f31dc1cdSMartin K. Petersen 	if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
459f31dc1cdSMartin K. Petersen 		return 0;
460f31dc1cdSMartin K. Petersen 
461d6d48196SJens Axboe 	/*
462d6d48196SJens Axboe 	 * not contiguous
463d6d48196SJens Axboe 	 */
46483096ebfSTejun Heo 	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
465d6d48196SJens Axboe 		return 0;
466d6d48196SJens Axboe 
467d6d48196SJens Axboe 	if (rq_data_dir(req) != rq_data_dir(next)
468d6d48196SJens Axboe 	    || req->rq_disk != next->rq_disk
469e7e24500SJens Axboe 	    || req_no_special_merge(next))
470d6d48196SJens Axboe 		return 0;
471d6d48196SJens Axboe 
4724363ac7cSMartin K. Petersen 	if (req->cmd_flags & REQ_WRITE_SAME &&
4734363ac7cSMartin K. Petersen 	    !blk_write_same_mergeable(req->bio, next->bio))
4744363ac7cSMartin K. Petersen 		return 0;
4754363ac7cSMartin K. Petersen 
476d6d48196SJens Axboe 	/*
477d6d48196SJens Axboe 	 * If we are allowed to merge, then append bio list
478d6d48196SJens Axboe 	 * from next to rq and release next. merge_requests_fn
479d6d48196SJens Axboe 	 * will have updated segment counts, update sector
480d6d48196SJens Axboe 	 * counts here.
481d6d48196SJens Axboe 	 */
482d6d48196SJens Axboe 	if (!ll_merge_requests_fn(q, req, next))
483d6d48196SJens Axboe 		return 0;
484d6d48196SJens Axboe 
485d6d48196SJens Axboe 	/*
48680a761fdSTejun Heo 	 * If failfast settings disagree or any of the two is already
48780a761fdSTejun Heo 	 * a mixed merge, mark both as mixed before proceeding.  This
48880a761fdSTejun Heo 	 * makes sure that all involved bios have mixable attributes
48980a761fdSTejun Heo 	 * set properly.
49080a761fdSTejun Heo 	 */
49180a761fdSTejun Heo 	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
49280a761fdSTejun Heo 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
49380a761fdSTejun Heo 	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
49480a761fdSTejun Heo 		blk_rq_set_mixed_merge(req);
49580a761fdSTejun Heo 		blk_rq_set_mixed_merge(next);
49680a761fdSTejun Heo 	}
49780a761fdSTejun Heo 
49880a761fdSTejun Heo 	/*
499d6d48196SJens Axboe 	 * At this point we have either done a back merge
500d6d48196SJens Axboe 	 * or front merge. We need the smaller start_time of
501d6d48196SJens Axboe 	 * the merged requests to be the current request
502d6d48196SJens Axboe 	 * for accounting purposes.
503d6d48196SJens Axboe 	 */
504d6d48196SJens Axboe 	if (time_after(req->start_time, next->start_time))
505d6d48196SJens Axboe 		req->start_time = next->start_time;
506d6d48196SJens Axboe 
507d6d48196SJens Axboe 	req->biotail->bi_next = next->bio;
508d6d48196SJens Axboe 	req->biotail = next->biotail;
509d6d48196SJens Axboe 
510a2dec7b3STejun Heo 	req->__data_len += blk_rq_bytes(next);
511d6d48196SJens Axboe 
512d6d48196SJens Axboe 	elv_merge_requests(q, req, next);
513d6d48196SJens Axboe 
51442dad764SJerome Marchand 	/*
51542dad764SJerome Marchand 	 * 'next' is going away, so update stats accordingly
51642dad764SJerome Marchand 	 */
51742dad764SJerome Marchand 	blk_account_io_merge(next);
518d6d48196SJens Axboe 
519d6d48196SJens Axboe 	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
520ab780f1eSJens Axboe 	if (blk_rq_cpu_valid(next))
521ab780f1eSJens Axboe 		req->cpu = next->cpu;
522d6d48196SJens Axboe 
5231cd96c24SBoaz Harrosh 	/* owner-ship of bio passed from next to req */
5241cd96c24SBoaz Harrosh 	next->bio = NULL;
525d6d48196SJens Axboe 	__blk_put_request(q, next);
526d6d48196SJens Axboe 	return 1;
527d6d48196SJens Axboe }
528d6d48196SJens Axboe 
529d6d48196SJens Axboe int attempt_back_merge(struct request_queue *q, struct request *rq)
530d6d48196SJens Axboe {
531d6d48196SJens Axboe 	struct request *next = elv_latter_request(q, rq);
532d6d48196SJens Axboe 
533d6d48196SJens Axboe 	if (next)
534d6d48196SJens Axboe 		return attempt_merge(q, rq, next);
535d6d48196SJens Axboe 
536d6d48196SJens Axboe 	return 0;
537d6d48196SJens Axboe }
538d6d48196SJens Axboe 
539d6d48196SJens Axboe int attempt_front_merge(struct request_queue *q, struct request *rq)
540d6d48196SJens Axboe {
541d6d48196SJens Axboe 	struct request *prev = elv_former_request(q, rq);
542d6d48196SJens Axboe 
543d6d48196SJens Axboe 	if (prev)
544d6d48196SJens Axboe 		return attempt_merge(q, prev, rq);
545d6d48196SJens Axboe 
546d6d48196SJens Axboe 	return 0;
547d6d48196SJens Axboe }
5485e84ea3aSJens Axboe 
5495e84ea3aSJens Axboe int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
5505e84ea3aSJens Axboe 			  struct request *next)
5515e84ea3aSJens Axboe {
5525e84ea3aSJens Axboe 	return attempt_merge(q, rq, next);
5535e84ea3aSJens Axboe }
554050c8ea8STejun Heo 
555050c8ea8STejun Heo bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
556050c8ea8STejun Heo {
557e2a60da7SMartin K. Petersen 	if (!rq_mergeable(rq) || !bio_mergeable(bio))
558050c8ea8STejun Heo 		return false;
559050c8ea8STejun Heo 
560f31dc1cdSMartin K. Petersen 	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
561f31dc1cdSMartin K. Petersen 		return false;
562f31dc1cdSMartin K. Petersen 
563050c8ea8STejun Heo 	/* different data direction or already started, don't merge */
564050c8ea8STejun Heo 	if (bio_data_dir(bio) != rq_data_dir(rq))
565050c8ea8STejun Heo 		return false;
566050c8ea8STejun Heo 
567050c8ea8STejun Heo 	/* must be same device and not a special request */
568e7e24500SJens Axboe 	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
569050c8ea8STejun Heo 		return false;
570050c8ea8STejun Heo 
571050c8ea8STejun Heo 	/* only merge integrity protected bio into ditto rq */
572050c8ea8STejun Heo 	if (bio_integrity(bio) != blk_integrity_rq(rq))
573050c8ea8STejun Heo 		return false;
574050c8ea8STejun Heo 
5754363ac7cSMartin K. Petersen 	/* must be using the same buffer */
5764363ac7cSMartin K. Petersen 	if (rq->cmd_flags & REQ_WRITE_SAME &&
5774363ac7cSMartin K. Petersen 	    !blk_write_same_mergeable(rq->bio, bio))
5784363ac7cSMartin K. Petersen 		return false;
5794363ac7cSMartin K. Petersen 
580050c8ea8STejun Heo 	return true;
581050c8ea8STejun Heo }
582050c8ea8STejun Heo 
583050c8ea8STejun Heo int blk_try_merge(struct request *rq, struct bio *bio)
584050c8ea8STejun Heo {
5854f024f37SKent Overstreet 	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
586050c8ea8STejun Heo 		return ELEVATOR_BACK_MERGE;
5874f024f37SKent Overstreet 	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
588050c8ea8STejun Heo 		return ELEVATOR_FRONT_MERGE;
589050c8ea8STejun Heo 	return ELEVATOR_NO_MERGE;
590050c8ea8STejun Heo }
591