xref: /openbmc/linux/block/blk-merge.c (revision f31dc1cd)
1d6d48196SJens Axboe /*
2d6d48196SJens Axboe  * Functions related to segment and merge handling
3d6d48196SJens Axboe  */
4d6d48196SJens Axboe #include <linux/kernel.h>
5d6d48196SJens Axboe #include <linux/module.h>
6d6d48196SJens Axboe #include <linux/bio.h>
7d6d48196SJens Axboe #include <linux/blkdev.h>
8d6d48196SJens Axboe #include <linux/scatterlist.h>
9d6d48196SJens Axboe 
10d6d48196SJens Axboe #include "blk.h"
11d6d48196SJens Axboe 
121e428079SJens Axboe static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
1359247eaeSJens Axboe 					     struct bio *bio)
14d6d48196SJens Axboe {
15d6d48196SJens Axboe 	struct bio_vec *bv, *bvprv = NULL;
161e428079SJens Axboe 	int cluster, i, high, highprv = 1;
171e428079SJens Axboe 	unsigned int seg_size, nr_phys_segs;
1859247eaeSJens Axboe 	struct bio *fbio, *bbio;
19d6d48196SJens Axboe 
201e428079SJens Axboe 	if (!bio)
211e428079SJens Axboe 		return 0;
22d6d48196SJens Axboe 
231e428079SJens Axboe 	fbio = bio;
24e692cb66SMartin K. Petersen 	cluster = blk_queue_cluster(q);
255df97b91SMikulas Patocka 	seg_size = 0;
262c8919deSAndi Kleen 	nr_phys_segs = 0;
271e428079SJens Axboe 	for_each_bio(bio) {
281e428079SJens Axboe 		bio_for_each_segment(bv, bio, i) {
29d6d48196SJens Axboe 			/*
301e428079SJens Axboe 			 * the trick here is making sure that a high page is
311e428079SJens Axboe 			 * never considered part of another segment, since that
321e428079SJens Axboe 			 * might change with the bounce page.
33d6d48196SJens Axboe 			 */
34ae03bf63SMartin K. Petersen 			high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
35d6d48196SJens Axboe 			if (high || highprv)
36b8b3e16cSMikulas Patocka 				goto new_segment;
37d6d48196SJens Axboe 			if (cluster) {
38ae03bf63SMartin K. Petersen 				if (seg_size + bv->bv_len
39ae03bf63SMartin K. Petersen 				    > queue_max_segment_size(q))
40d6d48196SJens Axboe 					goto new_segment;
41d6d48196SJens Axboe 				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
42d6d48196SJens Axboe 					goto new_segment;
43d6d48196SJens Axboe 				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
44d6d48196SJens Axboe 					goto new_segment;
45d6d48196SJens Axboe 
46d6d48196SJens Axboe 				seg_size += bv->bv_len;
47d6d48196SJens Axboe 				bvprv = bv;
48d6d48196SJens Axboe 				continue;
49d6d48196SJens Axboe 			}
50d6d48196SJens Axboe new_segment:
511e428079SJens Axboe 			if (nr_phys_segs == 1 && seg_size >
521e428079SJens Axboe 			    fbio->bi_seg_front_size)
531e428079SJens Axboe 				fbio->bi_seg_front_size = seg_size;
5486771427SFUJITA Tomonori 
55d6d48196SJens Axboe 			nr_phys_segs++;
56d6d48196SJens Axboe 			bvprv = bv;
57d6d48196SJens Axboe 			seg_size = bv->bv_len;
58d6d48196SJens Axboe 			highprv = high;
59d6d48196SJens Axboe 		}
6059247eaeSJens Axboe 		bbio = bio;
611e428079SJens Axboe 	}
62d6d48196SJens Axboe 
6359247eaeSJens Axboe 	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
6459247eaeSJens Axboe 		fbio->bi_seg_front_size = seg_size;
6559247eaeSJens Axboe 	if (seg_size > bbio->bi_seg_back_size)
6659247eaeSJens Axboe 		bbio->bi_seg_back_size = seg_size;
671e428079SJens Axboe 
681e428079SJens Axboe 	return nr_phys_segs;
691e428079SJens Axboe }
701e428079SJens Axboe 
711e428079SJens Axboe void blk_recalc_rq_segments(struct request *rq)
721e428079SJens Axboe {
7359247eaeSJens Axboe 	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
74d6d48196SJens Axboe }
75d6d48196SJens Axboe 
76d6d48196SJens Axboe void blk_recount_segments(struct request_queue *q, struct bio *bio)
77d6d48196SJens Axboe {
78d6d48196SJens Axboe 	struct bio *nxt = bio->bi_next;
791e428079SJens Axboe 
80d6d48196SJens Axboe 	bio->bi_next = NULL;
8159247eaeSJens Axboe 	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
82d6d48196SJens Axboe 	bio->bi_next = nxt;
83d6d48196SJens Axboe 	bio->bi_flags |= (1 << BIO_SEG_VALID);
84d6d48196SJens Axboe }
85d6d48196SJens Axboe EXPORT_SYMBOL(blk_recount_segments);
86d6d48196SJens Axboe 
87d6d48196SJens Axboe static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
88d6d48196SJens Axboe 				   struct bio *nxt)
89d6d48196SJens Axboe {
90e692cb66SMartin K. Petersen 	if (!blk_queue_cluster(q))
91d6d48196SJens Axboe 		return 0;
92d6d48196SJens Axboe 
9386771427SFUJITA Tomonori 	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
94ae03bf63SMartin K. Petersen 	    queue_max_segment_size(q))
95d6d48196SJens Axboe 		return 0;
96d6d48196SJens Axboe 
97e17fc0a1SDavid Woodhouse 	if (!bio_has_data(bio))
98e17fc0a1SDavid Woodhouse 		return 1;
99e17fc0a1SDavid Woodhouse 
100e17fc0a1SDavid Woodhouse 	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
101e17fc0a1SDavid Woodhouse 		return 0;
102e17fc0a1SDavid Woodhouse 
103d6d48196SJens Axboe 	/*
104e17fc0a1SDavid Woodhouse 	 * bio and nxt are contiguous in memory; check if the queue allows
105d6d48196SJens Axboe 	 * these two to be merged into one
106d6d48196SJens Axboe 	 */
107d6d48196SJens Axboe 	if (BIO_SEG_BOUNDARY(q, bio, nxt))
108d6d48196SJens Axboe 		return 1;
109d6d48196SJens Axboe 
110d6d48196SJens Axboe 	return 0;
111d6d48196SJens Axboe }
112d6d48196SJens Axboe 
113963ab9e5SAsias He static void
114963ab9e5SAsias He __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
115963ab9e5SAsias He 		     struct scatterlist *sglist, struct bio_vec **bvprv,
116963ab9e5SAsias He 		     struct scatterlist **sg, int *nsegs, int *cluster)
117963ab9e5SAsias He {
118963ab9e5SAsias He 
119963ab9e5SAsias He 	int nbytes = bvec->bv_len;
120963ab9e5SAsias He 
121963ab9e5SAsias He 	if (*bvprv && *cluster) {
122963ab9e5SAsias He 		if ((*sg)->length + nbytes > queue_max_segment_size(q))
123963ab9e5SAsias He 			goto new_segment;
124963ab9e5SAsias He 
125963ab9e5SAsias He 		if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
126963ab9e5SAsias He 			goto new_segment;
127963ab9e5SAsias He 		if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
128963ab9e5SAsias He 			goto new_segment;
129963ab9e5SAsias He 
130963ab9e5SAsias He 		(*sg)->length += nbytes;
131963ab9e5SAsias He 	} else {
132963ab9e5SAsias He new_segment:
133963ab9e5SAsias He 		if (!*sg)
134963ab9e5SAsias He 			*sg = sglist;
135963ab9e5SAsias He 		else {
136963ab9e5SAsias He 			/*
137963ab9e5SAsias He 			 * If the driver previously mapped a shorter
138963ab9e5SAsias He 			 * list, we could see a termination bit
139963ab9e5SAsias He 			 * prematurely unless it fully inits the sg
140963ab9e5SAsias He 			 * table on each mapping. We KNOW that there
141963ab9e5SAsias He 			 * must be more entries here or the driver
142963ab9e5SAsias He 			 * would be buggy, so force clear the
143963ab9e5SAsias He 			 * termination bit to avoid doing a full
144963ab9e5SAsias He 			 * sg_init_table() in drivers for each command.
145963ab9e5SAsias He 			 */
146963ab9e5SAsias He 			(*sg)->page_link &= ~0x02;
147963ab9e5SAsias He 			*sg = sg_next(*sg);
148963ab9e5SAsias He 		}
149963ab9e5SAsias He 
150963ab9e5SAsias He 		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
151963ab9e5SAsias He 		(*nsegs)++;
152963ab9e5SAsias He 	}
153963ab9e5SAsias He 	*bvprv = bvec;
154963ab9e5SAsias He }
155963ab9e5SAsias He 
156d6d48196SJens Axboe /*
157d6d48196SJens Axboe  * map a request to scatterlist, return number of sg entries setup. Caller
158d6d48196SJens Axboe  * must make sure sg can hold rq->nr_phys_segments entries
159d6d48196SJens Axboe  */
160d6d48196SJens Axboe int blk_rq_map_sg(struct request_queue *q, struct request *rq,
161d6d48196SJens Axboe 		  struct scatterlist *sglist)
162d6d48196SJens Axboe {
163d6d48196SJens Axboe 	struct bio_vec *bvec, *bvprv;
164d6d48196SJens Axboe 	struct req_iterator iter;
165d6d48196SJens Axboe 	struct scatterlist *sg;
166d6d48196SJens Axboe 	int nsegs, cluster;
167d6d48196SJens Axboe 
168d6d48196SJens Axboe 	nsegs = 0;
169e692cb66SMartin K. Petersen 	cluster = blk_queue_cluster(q);
170d6d48196SJens Axboe 
171d6d48196SJens Axboe 	/*
172d6d48196SJens Axboe 	 * for each bio in rq
173d6d48196SJens Axboe 	 */
174d6d48196SJens Axboe 	bvprv = NULL;
175d6d48196SJens Axboe 	sg = NULL;
176d6d48196SJens Axboe 	rq_for_each_segment(bvec, rq, iter) {
177963ab9e5SAsias He 		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
178963ab9e5SAsias He 				     &nsegs, &cluster);
179d6d48196SJens Axboe 	} /* segments in rq */
180d6d48196SJens Axboe 
181f18573abSFUJITA Tomonori 
182f18573abSFUJITA Tomonori 	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
1832e46e8b2STejun Heo 	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
1842e46e8b2STejun Heo 		unsigned int pad_len =
1852e46e8b2STejun Heo 			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
186f18573abSFUJITA Tomonori 
187f18573abSFUJITA Tomonori 		sg->length += pad_len;
188f18573abSFUJITA Tomonori 		rq->extra_len += pad_len;
189f18573abSFUJITA Tomonori 	}
190f18573abSFUJITA Tomonori 
1912fb98e84STejun Heo 	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
1927b6d91daSChristoph Hellwig 		if (rq->cmd_flags & REQ_WRITE)
193db0a2e00STejun Heo 			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
194db0a2e00STejun Heo 
195d6d48196SJens Axboe 		sg->page_link &= ~0x02;
196d6d48196SJens Axboe 		sg = sg_next(sg);
197d6d48196SJens Axboe 		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
198d6d48196SJens Axboe 			    q->dma_drain_size,
199d6d48196SJens Axboe 			    ((unsigned long)q->dma_drain_buffer) &
200d6d48196SJens Axboe 			    (PAGE_SIZE - 1));
201d6d48196SJens Axboe 		nsegs++;
2027a85f889SFUJITA Tomonori 		rq->extra_len += q->dma_drain_size;
203d6d48196SJens Axboe 	}
204d6d48196SJens Axboe 
205d6d48196SJens Axboe 	if (sg)
206d6d48196SJens Axboe 		sg_mark_end(sg);
207d6d48196SJens Axboe 
208d6d48196SJens Axboe 	return nsegs;
209d6d48196SJens Axboe }
210d6d48196SJens Axboe EXPORT_SYMBOL(blk_rq_map_sg);
211d6d48196SJens Axboe 
21285b9f66aSAsias He /**
21385b9f66aSAsias He  * blk_bio_map_sg - map a bio to a scatterlist
21485b9f66aSAsias He  * @q: request_queue in question
21585b9f66aSAsias He  * @bio: bio being mapped
21685b9f66aSAsias He  * @sglist: scatterlist being mapped
21785b9f66aSAsias He  *
21885b9f66aSAsias He  * Note:
21985b9f66aSAsias He  *    Caller must make sure sg can hold bio->bi_phys_segments entries
22085b9f66aSAsias He  *
22185b9f66aSAsias He  * Will return the number of sg entries setup
22285b9f66aSAsias He  */
22385b9f66aSAsias He int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
22485b9f66aSAsias He 		   struct scatterlist *sglist)
22585b9f66aSAsias He {
22685b9f66aSAsias He 	struct bio_vec *bvec, *bvprv;
22785b9f66aSAsias He 	struct scatterlist *sg;
22885b9f66aSAsias He 	int nsegs, cluster;
22985b9f66aSAsias He 	unsigned long i;
23085b9f66aSAsias He 
23185b9f66aSAsias He 	nsegs = 0;
23285b9f66aSAsias He 	cluster = blk_queue_cluster(q);
23385b9f66aSAsias He 
23485b9f66aSAsias He 	bvprv = NULL;
23585b9f66aSAsias He 	sg = NULL;
23685b9f66aSAsias He 	bio_for_each_segment(bvec, bio, i) {
23785b9f66aSAsias He 		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
23885b9f66aSAsias He 				     &nsegs, &cluster);
23985b9f66aSAsias He 	} /* segments in bio */
24085b9f66aSAsias He 
24185b9f66aSAsias He 	if (sg)
24285b9f66aSAsias He 		sg_mark_end(sg);
24385b9f66aSAsias He 
24485b9f66aSAsias He 	BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
24585b9f66aSAsias He 	return nsegs;
24685b9f66aSAsias He }
24785b9f66aSAsias He EXPORT_SYMBOL(blk_bio_map_sg);
24885b9f66aSAsias He 
249d6d48196SJens Axboe static inline int ll_new_hw_segment(struct request_queue *q,
250d6d48196SJens Axboe 				    struct request *req,
251d6d48196SJens Axboe 				    struct bio *bio)
252d6d48196SJens Axboe {
253d6d48196SJens Axboe 	int nr_phys_segs = bio_phys_segments(q, bio);
254d6d48196SJens Axboe 
25513f05c8dSMartin K. Petersen 	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
25613f05c8dSMartin K. Petersen 		goto no_merge;
25713f05c8dSMartin K. Petersen 
25813f05c8dSMartin K. Petersen 	if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
25913f05c8dSMartin K. Petersen 		goto no_merge;
260d6d48196SJens Axboe 
261d6d48196SJens Axboe 	/*
262d6d48196SJens Axboe 	 * This will form the start of a new hw segment.  Bump both
263d6d48196SJens Axboe 	 * counters.
264d6d48196SJens Axboe 	 */
265d6d48196SJens Axboe 	req->nr_phys_segments += nr_phys_segs;
266d6d48196SJens Axboe 	return 1;
26713f05c8dSMartin K. Petersen 
26813f05c8dSMartin K. Petersen no_merge:
26913f05c8dSMartin K. Petersen 	req->cmd_flags |= REQ_NOMERGE;
27013f05c8dSMartin K. Petersen 	if (req == q->last_merge)
27113f05c8dSMartin K. Petersen 		q->last_merge = NULL;
27213f05c8dSMartin K. Petersen 	return 0;
273d6d48196SJens Axboe }
274d6d48196SJens Axboe 
275d6d48196SJens Axboe int ll_back_merge_fn(struct request_queue *q, struct request *req,
276d6d48196SJens Axboe 		     struct bio *bio)
277d6d48196SJens Axboe {
278f31dc1cdSMartin K. Petersen 	if (blk_rq_sectors(req) + bio_sectors(bio) >
279f31dc1cdSMartin K. Petersen 	    blk_rq_get_max_sectors(req)) {
280d6d48196SJens Axboe 		req->cmd_flags |= REQ_NOMERGE;
281d6d48196SJens Axboe 		if (req == q->last_merge)
282d6d48196SJens Axboe 			q->last_merge = NULL;
283d6d48196SJens Axboe 		return 0;
284d6d48196SJens Axboe 	}
2852cdf79caSJens Axboe 	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
286d6d48196SJens Axboe 		blk_recount_segments(q, req->biotail);
2872cdf79caSJens Axboe 	if (!bio_flagged(bio, BIO_SEG_VALID))
288d6d48196SJens Axboe 		blk_recount_segments(q, bio);
289d6d48196SJens Axboe 
290d6d48196SJens Axboe 	return ll_new_hw_segment(q, req, bio);
291d6d48196SJens Axboe }
292d6d48196SJens Axboe 
293d6d48196SJens Axboe int ll_front_merge_fn(struct request_queue *q, struct request *req,
294d6d48196SJens Axboe 		      struct bio *bio)
295d6d48196SJens Axboe {
296f31dc1cdSMartin K. Petersen 	if (blk_rq_sectors(req) + bio_sectors(bio) >
297f31dc1cdSMartin K. Petersen 	    blk_rq_get_max_sectors(req)) {
298d6d48196SJens Axboe 		req->cmd_flags |= REQ_NOMERGE;
299d6d48196SJens Axboe 		if (req == q->last_merge)
300d6d48196SJens Axboe 			q->last_merge = NULL;
301d6d48196SJens Axboe 		return 0;
302d6d48196SJens Axboe 	}
3032cdf79caSJens Axboe 	if (!bio_flagged(bio, BIO_SEG_VALID))
304d6d48196SJens Axboe 		blk_recount_segments(q, bio);
3052cdf79caSJens Axboe 	if (!bio_flagged(req->bio, BIO_SEG_VALID))
306d6d48196SJens Axboe 		blk_recount_segments(q, req->bio);
307d6d48196SJens Axboe 
308d6d48196SJens Axboe 	return ll_new_hw_segment(q, req, bio);
309d6d48196SJens Axboe }
310d6d48196SJens Axboe 
311d6d48196SJens Axboe static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
312d6d48196SJens Axboe 				struct request *next)
313d6d48196SJens Axboe {
314d6d48196SJens Axboe 	int total_phys_segments;
31586771427SFUJITA Tomonori 	unsigned int seg_size =
31686771427SFUJITA Tomonori 		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
317d6d48196SJens Axboe 
318d6d48196SJens Axboe 	/*
319d6d48196SJens Axboe 	 * First check if the either of the requests are re-queued
320d6d48196SJens Axboe 	 * requests.  Can't merge them if they are.
321d6d48196SJens Axboe 	 */
322d6d48196SJens Axboe 	if (req->special || next->special)
323d6d48196SJens Axboe 		return 0;
324d6d48196SJens Axboe 
325d6d48196SJens Axboe 	/*
326d6d48196SJens Axboe 	 * Will it become too large?
327d6d48196SJens Axboe 	 */
328f31dc1cdSMartin K. Petersen 	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
329f31dc1cdSMartin K. Petersen 	    blk_rq_get_max_sectors(req))
330d6d48196SJens Axboe 		return 0;
331d6d48196SJens Axboe 
332d6d48196SJens Axboe 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
33386771427SFUJITA Tomonori 	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
33486771427SFUJITA Tomonori 		if (req->nr_phys_segments == 1)
33586771427SFUJITA Tomonori 			req->bio->bi_seg_front_size = seg_size;
33686771427SFUJITA Tomonori 		if (next->nr_phys_segments == 1)
33786771427SFUJITA Tomonori 			next->biotail->bi_seg_back_size = seg_size;
338d6d48196SJens Axboe 		total_phys_segments--;
33986771427SFUJITA Tomonori 	}
340d6d48196SJens Axboe 
3418a78362cSMartin K. Petersen 	if (total_phys_segments > queue_max_segments(q))
342d6d48196SJens Axboe 		return 0;
343d6d48196SJens Axboe 
34413f05c8dSMartin K. Petersen 	if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
34513f05c8dSMartin K. Petersen 		return 0;
34613f05c8dSMartin K. Petersen 
347d6d48196SJens Axboe 	/* Merge is OK... */
348d6d48196SJens Axboe 	req->nr_phys_segments = total_phys_segments;
349d6d48196SJens Axboe 	return 1;
350d6d48196SJens Axboe }
351d6d48196SJens Axboe 
35280a761fdSTejun Heo /**
35380a761fdSTejun Heo  * blk_rq_set_mixed_merge - mark a request as mixed merge
35480a761fdSTejun Heo  * @rq: request to mark as mixed merge
35580a761fdSTejun Heo  *
35680a761fdSTejun Heo  * Description:
35780a761fdSTejun Heo  *     @rq is about to be mixed merged.  Make sure the attributes
35880a761fdSTejun Heo  *     which can be mixed are set in each bio and mark @rq as mixed
35980a761fdSTejun Heo  *     merged.
36080a761fdSTejun Heo  */
36180a761fdSTejun Heo void blk_rq_set_mixed_merge(struct request *rq)
36280a761fdSTejun Heo {
36380a761fdSTejun Heo 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
36480a761fdSTejun Heo 	struct bio *bio;
36580a761fdSTejun Heo 
36680a761fdSTejun Heo 	if (rq->cmd_flags & REQ_MIXED_MERGE)
36780a761fdSTejun Heo 		return;
36880a761fdSTejun Heo 
36980a761fdSTejun Heo 	/*
37080a761fdSTejun Heo 	 * @rq will no longer represent mixable attributes for all the
37180a761fdSTejun Heo 	 * contained bios.  It will just track those of the first one.
37280a761fdSTejun Heo 	 * Distributes the attributs to each bio.
37380a761fdSTejun Heo 	 */
37480a761fdSTejun Heo 	for (bio = rq->bio; bio; bio = bio->bi_next) {
37580a761fdSTejun Heo 		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
37680a761fdSTejun Heo 			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
37780a761fdSTejun Heo 		bio->bi_rw |= ff;
37880a761fdSTejun Heo 	}
37980a761fdSTejun Heo 	rq->cmd_flags |= REQ_MIXED_MERGE;
38080a761fdSTejun Heo }
38180a761fdSTejun Heo 
38226308eabSJerome Marchand static void blk_account_io_merge(struct request *req)
38326308eabSJerome Marchand {
38426308eabSJerome Marchand 	if (blk_do_io_stat(req)) {
38526308eabSJerome Marchand 		struct hd_struct *part;
38626308eabSJerome Marchand 		int cpu;
38726308eabSJerome Marchand 
38826308eabSJerome Marchand 		cpu = part_stat_lock();
38909e099d4SJerome Marchand 		part = req->part;
39026308eabSJerome Marchand 
39126308eabSJerome Marchand 		part_round_stats(cpu, part);
392316d315bSNikanth Karthikesan 		part_dec_in_flight(part, rq_data_dir(req));
39326308eabSJerome Marchand 
3946c23a968SJens Axboe 		hd_struct_put(part);
39526308eabSJerome Marchand 		part_stat_unlock();
39626308eabSJerome Marchand 	}
39726308eabSJerome Marchand }
39826308eabSJerome Marchand 
399d6d48196SJens Axboe /*
400d6d48196SJens Axboe  * Has to be called with the request spinlock acquired
401d6d48196SJens Axboe  */
402d6d48196SJens Axboe static int attempt_merge(struct request_queue *q, struct request *req,
403d6d48196SJens Axboe 			  struct request *next)
404d6d48196SJens Axboe {
405d6d48196SJens Axboe 	if (!rq_mergeable(req) || !rq_mergeable(next))
406d6d48196SJens Axboe 		return 0;
407d6d48196SJens Axboe 
408f31dc1cdSMartin K. Petersen 	if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
409f31dc1cdSMartin K. Petersen 		return 0;
410f31dc1cdSMartin K. Petersen 
411d6d48196SJens Axboe 	/*
412d6d48196SJens Axboe 	 * not contiguous
413d6d48196SJens Axboe 	 */
41483096ebfSTejun Heo 	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
415d6d48196SJens Axboe 		return 0;
416d6d48196SJens Axboe 
417d6d48196SJens Axboe 	if (rq_data_dir(req) != rq_data_dir(next)
418d6d48196SJens Axboe 	    || req->rq_disk != next->rq_disk
419d6d48196SJens Axboe 	    || next->special)
420d6d48196SJens Axboe 		return 0;
421d6d48196SJens Axboe 
422d6d48196SJens Axboe 	/*
423d6d48196SJens Axboe 	 * If we are allowed to merge, then append bio list
424d6d48196SJens Axboe 	 * from next to rq and release next. merge_requests_fn
425d6d48196SJens Axboe 	 * will have updated segment counts, update sector
426d6d48196SJens Axboe 	 * counts here.
427d6d48196SJens Axboe 	 */
428d6d48196SJens Axboe 	if (!ll_merge_requests_fn(q, req, next))
429d6d48196SJens Axboe 		return 0;
430d6d48196SJens Axboe 
431d6d48196SJens Axboe 	/*
43280a761fdSTejun Heo 	 * If failfast settings disagree or any of the two is already
43380a761fdSTejun Heo 	 * a mixed merge, mark both as mixed before proceeding.  This
43480a761fdSTejun Heo 	 * makes sure that all involved bios have mixable attributes
43580a761fdSTejun Heo 	 * set properly.
43680a761fdSTejun Heo 	 */
43780a761fdSTejun Heo 	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
43880a761fdSTejun Heo 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
43980a761fdSTejun Heo 	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
44080a761fdSTejun Heo 		blk_rq_set_mixed_merge(req);
44180a761fdSTejun Heo 		blk_rq_set_mixed_merge(next);
44280a761fdSTejun Heo 	}
44380a761fdSTejun Heo 
44480a761fdSTejun Heo 	/*
445d6d48196SJens Axboe 	 * At this point we have either done a back merge
446d6d48196SJens Axboe 	 * or front merge. We need the smaller start_time of
447d6d48196SJens Axboe 	 * the merged requests to be the current request
448d6d48196SJens Axboe 	 * for accounting purposes.
449d6d48196SJens Axboe 	 */
450d6d48196SJens Axboe 	if (time_after(req->start_time, next->start_time))
451d6d48196SJens Axboe 		req->start_time = next->start_time;
452d6d48196SJens Axboe 
453d6d48196SJens Axboe 	req->biotail->bi_next = next->bio;
454d6d48196SJens Axboe 	req->biotail = next->biotail;
455d6d48196SJens Axboe 
456a2dec7b3STejun Heo 	req->__data_len += blk_rq_bytes(next);
457d6d48196SJens Axboe 
458d6d48196SJens Axboe 	elv_merge_requests(q, req, next);
459d6d48196SJens Axboe 
46042dad764SJerome Marchand 	/*
46142dad764SJerome Marchand 	 * 'next' is going away, so update stats accordingly
46242dad764SJerome Marchand 	 */
46342dad764SJerome Marchand 	blk_account_io_merge(next);
464d6d48196SJens Axboe 
465d6d48196SJens Axboe 	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
466ab780f1eSJens Axboe 	if (blk_rq_cpu_valid(next))
467ab780f1eSJens Axboe 		req->cpu = next->cpu;
468d6d48196SJens Axboe 
4691cd96c24SBoaz Harrosh 	/* owner-ship of bio passed from next to req */
4701cd96c24SBoaz Harrosh 	next->bio = NULL;
471d6d48196SJens Axboe 	__blk_put_request(q, next);
472d6d48196SJens Axboe 	return 1;
473d6d48196SJens Axboe }
474d6d48196SJens Axboe 
475d6d48196SJens Axboe int attempt_back_merge(struct request_queue *q, struct request *rq)
476d6d48196SJens Axboe {
477d6d48196SJens Axboe 	struct request *next = elv_latter_request(q, rq);
478d6d48196SJens Axboe 
479d6d48196SJens Axboe 	if (next)
480d6d48196SJens Axboe 		return attempt_merge(q, rq, next);
481d6d48196SJens Axboe 
482d6d48196SJens Axboe 	return 0;
483d6d48196SJens Axboe }
484d6d48196SJens Axboe 
485d6d48196SJens Axboe int attempt_front_merge(struct request_queue *q, struct request *rq)
486d6d48196SJens Axboe {
487d6d48196SJens Axboe 	struct request *prev = elv_former_request(q, rq);
488d6d48196SJens Axboe 
489d6d48196SJens Axboe 	if (prev)
490d6d48196SJens Axboe 		return attempt_merge(q, prev, rq);
491d6d48196SJens Axboe 
492d6d48196SJens Axboe 	return 0;
493d6d48196SJens Axboe }
4945e84ea3aSJens Axboe 
4955e84ea3aSJens Axboe int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
4965e84ea3aSJens Axboe 			  struct request *next)
4975e84ea3aSJens Axboe {
4985e84ea3aSJens Axboe 	return attempt_merge(q, rq, next);
4995e84ea3aSJens Axboe }
500050c8ea8STejun Heo 
501050c8ea8STejun Heo bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
502050c8ea8STejun Heo {
503e2a60da7SMartin K. Petersen 	if (!rq_mergeable(rq) || !bio_mergeable(bio))
504050c8ea8STejun Heo 		return false;
505050c8ea8STejun Heo 
506f31dc1cdSMartin K. Petersen 	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
507f31dc1cdSMartin K. Petersen 		return false;
508f31dc1cdSMartin K. Petersen 
509050c8ea8STejun Heo 	/* different data direction or already started, don't merge */
510050c8ea8STejun Heo 	if (bio_data_dir(bio) != rq_data_dir(rq))
511050c8ea8STejun Heo 		return false;
512050c8ea8STejun Heo 
513050c8ea8STejun Heo 	/* must be same device and not a special request */
514050c8ea8STejun Heo 	if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
515050c8ea8STejun Heo 		return false;
516050c8ea8STejun Heo 
517050c8ea8STejun Heo 	/* only merge integrity protected bio into ditto rq */
518050c8ea8STejun Heo 	if (bio_integrity(bio) != blk_integrity_rq(rq))
519050c8ea8STejun Heo 		return false;
520050c8ea8STejun Heo 
521050c8ea8STejun Heo 	return true;
522050c8ea8STejun Heo }
523050c8ea8STejun Heo 
524050c8ea8STejun Heo int blk_try_merge(struct request *rq, struct bio *bio)
525050c8ea8STejun Heo {
526050c8ea8STejun Heo 	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
527050c8ea8STejun Heo 		return ELEVATOR_BACK_MERGE;
528050c8ea8STejun Heo 	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
529050c8ea8STejun Heo 		return ELEVATOR_FRONT_MERGE;
530050c8ea8STejun Heo 	return ELEVATOR_NO_MERGE;
531050c8ea8STejun Heo }
532