xref: /openbmc/linux/block/blk-merge.c (revision 4f024f37)
1d6d48196SJens Axboe /*
2d6d48196SJens Axboe  * Functions related to segment and merge handling
3d6d48196SJens Axboe  */
4d6d48196SJens Axboe #include <linux/kernel.h>
5d6d48196SJens Axboe #include <linux/module.h>
6d6d48196SJens Axboe #include <linux/bio.h>
7d6d48196SJens Axboe #include <linux/blkdev.h>
8d6d48196SJens Axboe #include <linux/scatterlist.h>
9d6d48196SJens Axboe 
10d6d48196SJens Axboe #include "blk.h"
11d6d48196SJens Axboe 
121e428079SJens Axboe static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
1359247eaeSJens Axboe 					     struct bio *bio)
14d6d48196SJens Axboe {
15d6d48196SJens Axboe 	struct bio_vec *bv, *bvprv = NULL;
161e428079SJens Axboe 	int cluster, i, high, highprv = 1;
171e428079SJens Axboe 	unsigned int seg_size, nr_phys_segs;
1859247eaeSJens Axboe 	struct bio *fbio, *bbio;
19d6d48196SJens Axboe 
201e428079SJens Axboe 	if (!bio)
211e428079SJens Axboe 		return 0;
22d6d48196SJens Axboe 
231e428079SJens Axboe 	fbio = bio;
24e692cb66SMartin K. Petersen 	cluster = blk_queue_cluster(q);
255df97b91SMikulas Patocka 	seg_size = 0;
262c8919deSAndi Kleen 	nr_phys_segs = 0;
271e428079SJens Axboe 	for_each_bio(bio) {
281e428079SJens Axboe 		bio_for_each_segment(bv, bio, i) {
29d6d48196SJens Axboe 			/*
301e428079SJens Axboe 			 * the trick here is making sure that a high page is
311e428079SJens Axboe 			 * never considered part of another segment, since that
321e428079SJens Axboe 			 * might change with the bounce page.
33d6d48196SJens Axboe 			 */
34ae03bf63SMartin K. Petersen 			high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
35d6d48196SJens Axboe 			if (high || highprv)
36b8b3e16cSMikulas Patocka 				goto new_segment;
37d6d48196SJens Axboe 			if (cluster) {
38ae03bf63SMartin K. Petersen 				if (seg_size + bv->bv_len
39ae03bf63SMartin K. Petersen 				    > queue_max_segment_size(q))
40d6d48196SJens Axboe 					goto new_segment;
41d6d48196SJens Axboe 				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
42d6d48196SJens Axboe 					goto new_segment;
43d6d48196SJens Axboe 				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
44d6d48196SJens Axboe 					goto new_segment;
45d6d48196SJens Axboe 
46d6d48196SJens Axboe 				seg_size += bv->bv_len;
47d6d48196SJens Axboe 				bvprv = bv;
48d6d48196SJens Axboe 				continue;
49d6d48196SJens Axboe 			}
50d6d48196SJens Axboe new_segment:
511e428079SJens Axboe 			if (nr_phys_segs == 1 && seg_size >
521e428079SJens Axboe 			    fbio->bi_seg_front_size)
531e428079SJens Axboe 				fbio->bi_seg_front_size = seg_size;
5486771427SFUJITA Tomonori 
55d6d48196SJens Axboe 			nr_phys_segs++;
56d6d48196SJens Axboe 			bvprv = bv;
57d6d48196SJens Axboe 			seg_size = bv->bv_len;
58d6d48196SJens Axboe 			highprv = high;
59d6d48196SJens Axboe 		}
6059247eaeSJens Axboe 		bbio = bio;
611e428079SJens Axboe 	}
62d6d48196SJens Axboe 
6359247eaeSJens Axboe 	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
6459247eaeSJens Axboe 		fbio->bi_seg_front_size = seg_size;
6559247eaeSJens Axboe 	if (seg_size > bbio->bi_seg_back_size)
6659247eaeSJens Axboe 		bbio->bi_seg_back_size = seg_size;
671e428079SJens Axboe 
681e428079SJens Axboe 	return nr_phys_segs;
691e428079SJens Axboe }
701e428079SJens Axboe 
711e428079SJens Axboe void blk_recalc_rq_segments(struct request *rq)
721e428079SJens Axboe {
7359247eaeSJens Axboe 	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
74d6d48196SJens Axboe }
75d6d48196SJens Axboe 
76d6d48196SJens Axboe void blk_recount_segments(struct request_queue *q, struct bio *bio)
77d6d48196SJens Axboe {
78d6d48196SJens Axboe 	struct bio *nxt = bio->bi_next;
791e428079SJens Axboe 
80d6d48196SJens Axboe 	bio->bi_next = NULL;
8159247eaeSJens Axboe 	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
82d6d48196SJens Axboe 	bio->bi_next = nxt;
83d6d48196SJens Axboe 	bio->bi_flags |= (1 << BIO_SEG_VALID);
84d6d48196SJens Axboe }
85d6d48196SJens Axboe EXPORT_SYMBOL(blk_recount_segments);
86d6d48196SJens Axboe 
87d6d48196SJens Axboe static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
88d6d48196SJens Axboe 				   struct bio *nxt)
89d6d48196SJens Axboe {
90e692cb66SMartin K. Petersen 	if (!blk_queue_cluster(q))
91d6d48196SJens Axboe 		return 0;
92d6d48196SJens Axboe 
9386771427SFUJITA Tomonori 	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
94ae03bf63SMartin K. Petersen 	    queue_max_segment_size(q))
95d6d48196SJens Axboe 		return 0;
96d6d48196SJens Axboe 
97e17fc0a1SDavid Woodhouse 	if (!bio_has_data(bio))
98e17fc0a1SDavid Woodhouse 		return 1;
99e17fc0a1SDavid Woodhouse 
100e17fc0a1SDavid Woodhouse 	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
101e17fc0a1SDavid Woodhouse 		return 0;
102e17fc0a1SDavid Woodhouse 
103d6d48196SJens Axboe 	/*
104e17fc0a1SDavid Woodhouse 	 * bio and nxt are contiguous in memory; check if the queue allows
105d6d48196SJens Axboe 	 * these two to be merged into one
106d6d48196SJens Axboe 	 */
107d6d48196SJens Axboe 	if (BIO_SEG_BOUNDARY(q, bio, nxt))
108d6d48196SJens Axboe 		return 1;
109d6d48196SJens Axboe 
110d6d48196SJens Axboe 	return 0;
111d6d48196SJens Axboe }
112d6d48196SJens Axboe 
113963ab9e5SAsias He static void
114963ab9e5SAsias He __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
115963ab9e5SAsias He 		     struct scatterlist *sglist, struct bio_vec **bvprv,
116963ab9e5SAsias He 		     struct scatterlist **sg, int *nsegs, int *cluster)
117963ab9e5SAsias He {
118963ab9e5SAsias He 
119963ab9e5SAsias He 	int nbytes = bvec->bv_len;
120963ab9e5SAsias He 
121963ab9e5SAsias He 	if (*bvprv && *cluster) {
122963ab9e5SAsias He 		if ((*sg)->length + nbytes > queue_max_segment_size(q))
123963ab9e5SAsias He 			goto new_segment;
124963ab9e5SAsias He 
125963ab9e5SAsias He 		if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
126963ab9e5SAsias He 			goto new_segment;
127963ab9e5SAsias He 		if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
128963ab9e5SAsias He 			goto new_segment;
129963ab9e5SAsias He 
130963ab9e5SAsias He 		(*sg)->length += nbytes;
131963ab9e5SAsias He 	} else {
132963ab9e5SAsias He new_segment:
133963ab9e5SAsias He 		if (!*sg)
134963ab9e5SAsias He 			*sg = sglist;
135963ab9e5SAsias He 		else {
136963ab9e5SAsias He 			/*
137963ab9e5SAsias He 			 * If the driver previously mapped a shorter
138963ab9e5SAsias He 			 * list, we could see a termination bit
139963ab9e5SAsias He 			 * prematurely unless it fully inits the sg
140963ab9e5SAsias He 			 * table on each mapping. We KNOW that there
141963ab9e5SAsias He 			 * must be more entries here or the driver
142963ab9e5SAsias He 			 * would be buggy, so force clear the
143963ab9e5SAsias He 			 * termination bit to avoid doing a full
144963ab9e5SAsias He 			 * sg_init_table() in drivers for each command.
145963ab9e5SAsias He 			 */
146c8164d89SPaolo Bonzini 			sg_unmark_end(*sg);
147963ab9e5SAsias He 			*sg = sg_next(*sg);
148963ab9e5SAsias He 		}
149963ab9e5SAsias He 
150963ab9e5SAsias He 		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
151963ab9e5SAsias He 		(*nsegs)++;
152963ab9e5SAsias He 	}
153963ab9e5SAsias He 	*bvprv = bvec;
154963ab9e5SAsias He }
155963ab9e5SAsias He 
156d6d48196SJens Axboe /*
157d6d48196SJens Axboe  * map a request to scatterlist, return number of sg entries setup. Caller
158d6d48196SJens Axboe  * must make sure sg can hold rq->nr_phys_segments entries
159d6d48196SJens Axboe  */
160d6d48196SJens Axboe int blk_rq_map_sg(struct request_queue *q, struct request *rq,
161d6d48196SJens Axboe 		  struct scatterlist *sglist)
162d6d48196SJens Axboe {
163d6d48196SJens Axboe 	struct bio_vec *bvec, *bvprv;
164d6d48196SJens Axboe 	struct req_iterator iter;
165d6d48196SJens Axboe 	struct scatterlist *sg;
166d6d48196SJens Axboe 	int nsegs, cluster;
167d6d48196SJens Axboe 
168d6d48196SJens Axboe 	nsegs = 0;
169e692cb66SMartin K. Petersen 	cluster = blk_queue_cluster(q);
170d6d48196SJens Axboe 
171d6d48196SJens Axboe 	/*
172d6d48196SJens Axboe 	 * for each bio in rq
173d6d48196SJens Axboe 	 */
174d6d48196SJens Axboe 	bvprv = NULL;
175d6d48196SJens Axboe 	sg = NULL;
176d6d48196SJens Axboe 	rq_for_each_segment(bvec, rq, iter) {
177963ab9e5SAsias He 		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
178963ab9e5SAsias He 				     &nsegs, &cluster);
179d6d48196SJens Axboe 	} /* segments in rq */
180d6d48196SJens Axboe 
181f18573abSFUJITA Tomonori 
182f18573abSFUJITA Tomonori 	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
1832e46e8b2STejun Heo 	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
1842e46e8b2STejun Heo 		unsigned int pad_len =
1852e46e8b2STejun Heo 			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
186f18573abSFUJITA Tomonori 
187f18573abSFUJITA Tomonori 		sg->length += pad_len;
188f18573abSFUJITA Tomonori 		rq->extra_len += pad_len;
189f18573abSFUJITA Tomonori 	}
190f18573abSFUJITA Tomonori 
1912fb98e84STejun Heo 	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
1927b6d91daSChristoph Hellwig 		if (rq->cmd_flags & REQ_WRITE)
193db0a2e00STejun Heo 			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
194db0a2e00STejun Heo 
195d6d48196SJens Axboe 		sg->page_link &= ~0x02;
196d6d48196SJens Axboe 		sg = sg_next(sg);
197d6d48196SJens Axboe 		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
198d6d48196SJens Axboe 			    q->dma_drain_size,
199d6d48196SJens Axboe 			    ((unsigned long)q->dma_drain_buffer) &
200d6d48196SJens Axboe 			    (PAGE_SIZE - 1));
201d6d48196SJens Axboe 		nsegs++;
2027a85f889SFUJITA Tomonori 		rq->extra_len += q->dma_drain_size;
203d6d48196SJens Axboe 	}
204d6d48196SJens Axboe 
205d6d48196SJens Axboe 	if (sg)
206d6d48196SJens Axboe 		sg_mark_end(sg);
207d6d48196SJens Axboe 
208d6d48196SJens Axboe 	return nsegs;
209d6d48196SJens Axboe }
210d6d48196SJens Axboe EXPORT_SYMBOL(blk_rq_map_sg);
211d6d48196SJens Axboe 
21285b9f66aSAsias He /**
21385b9f66aSAsias He  * blk_bio_map_sg - map a bio to a scatterlist
21485b9f66aSAsias He  * @q: request_queue in question
21585b9f66aSAsias He  * @bio: bio being mapped
21685b9f66aSAsias He  * @sglist: scatterlist being mapped
21785b9f66aSAsias He  *
21885b9f66aSAsias He  * Note:
21985b9f66aSAsias He  *    Caller must make sure sg can hold bio->bi_phys_segments entries
22085b9f66aSAsias He  *
22185b9f66aSAsias He  * Will return the number of sg entries setup
22285b9f66aSAsias He  */
22385b9f66aSAsias He int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
22485b9f66aSAsias He 		   struct scatterlist *sglist)
22585b9f66aSAsias He {
22685b9f66aSAsias He 	struct bio_vec *bvec, *bvprv;
22785b9f66aSAsias He 	struct scatterlist *sg;
22885b9f66aSAsias He 	int nsegs, cluster;
22985b9f66aSAsias He 	unsigned long i;
23085b9f66aSAsias He 
23185b9f66aSAsias He 	nsegs = 0;
23285b9f66aSAsias He 	cluster = blk_queue_cluster(q);
23385b9f66aSAsias He 
23485b9f66aSAsias He 	bvprv = NULL;
23585b9f66aSAsias He 	sg = NULL;
23685b9f66aSAsias He 	bio_for_each_segment(bvec, bio, i) {
23785b9f66aSAsias He 		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
23885b9f66aSAsias He 				     &nsegs, &cluster);
23985b9f66aSAsias He 	} /* segments in bio */
24085b9f66aSAsias He 
24185b9f66aSAsias He 	if (sg)
24285b9f66aSAsias He 		sg_mark_end(sg);
24385b9f66aSAsias He 
24485b9f66aSAsias He 	BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
24585b9f66aSAsias He 	return nsegs;
24685b9f66aSAsias He }
24785b9f66aSAsias He EXPORT_SYMBOL(blk_bio_map_sg);
24885b9f66aSAsias He 
249d6d48196SJens Axboe static inline int ll_new_hw_segment(struct request_queue *q,
250d6d48196SJens Axboe 				    struct request *req,
251d6d48196SJens Axboe 				    struct bio *bio)
252d6d48196SJens Axboe {
253d6d48196SJens Axboe 	int nr_phys_segs = bio_phys_segments(q, bio);
254d6d48196SJens Axboe 
25513f05c8dSMartin K. Petersen 	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
25613f05c8dSMartin K. Petersen 		goto no_merge;
25713f05c8dSMartin K. Petersen 
25813f05c8dSMartin K. Petersen 	if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
25913f05c8dSMartin K. Petersen 		goto no_merge;
260d6d48196SJens Axboe 
261d6d48196SJens Axboe 	/*
262d6d48196SJens Axboe 	 * This will form the start of a new hw segment.  Bump both
263d6d48196SJens Axboe 	 * counters.
264d6d48196SJens Axboe 	 */
265d6d48196SJens Axboe 	req->nr_phys_segments += nr_phys_segs;
266d6d48196SJens Axboe 	return 1;
26713f05c8dSMartin K. Petersen 
26813f05c8dSMartin K. Petersen no_merge:
26913f05c8dSMartin K. Petersen 	req->cmd_flags |= REQ_NOMERGE;
27013f05c8dSMartin K. Petersen 	if (req == q->last_merge)
27113f05c8dSMartin K. Petersen 		q->last_merge = NULL;
27213f05c8dSMartin K. Petersen 	return 0;
273d6d48196SJens Axboe }
274d6d48196SJens Axboe 
275d6d48196SJens Axboe int ll_back_merge_fn(struct request_queue *q, struct request *req,
276d6d48196SJens Axboe 		     struct bio *bio)
277d6d48196SJens Axboe {
278f31dc1cdSMartin K. Petersen 	if (blk_rq_sectors(req) + bio_sectors(bio) >
279f31dc1cdSMartin K. Petersen 	    blk_rq_get_max_sectors(req)) {
280d6d48196SJens Axboe 		req->cmd_flags |= REQ_NOMERGE;
281d6d48196SJens Axboe 		if (req == q->last_merge)
282d6d48196SJens Axboe 			q->last_merge = NULL;
283d6d48196SJens Axboe 		return 0;
284d6d48196SJens Axboe 	}
2852cdf79caSJens Axboe 	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
286d6d48196SJens Axboe 		blk_recount_segments(q, req->biotail);
2872cdf79caSJens Axboe 	if (!bio_flagged(bio, BIO_SEG_VALID))
288d6d48196SJens Axboe 		blk_recount_segments(q, bio);
289d6d48196SJens Axboe 
290d6d48196SJens Axboe 	return ll_new_hw_segment(q, req, bio);
291d6d48196SJens Axboe }
292d6d48196SJens Axboe 
293d6d48196SJens Axboe int ll_front_merge_fn(struct request_queue *q, struct request *req,
294d6d48196SJens Axboe 		      struct bio *bio)
295d6d48196SJens Axboe {
296f31dc1cdSMartin K. Petersen 	if (blk_rq_sectors(req) + bio_sectors(bio) >
297f31dc1cdSMartin K. Petersen 	    blk_rq_get_max_sectors(req)) {
298d6d48196SJens Axboe 		req->cmd_flags |= REQ_NOMERGE;
299d6d48196SJens Axboe 		if (req == q->last_merge)
300d6d48196SJens Axboe 			q->last_merge = NULL;
301d6d48196SJens Axboe 		return 0;
302d6d48196SJens Axboe 	}
3032cdf79caSJens Axboe 	if (!bio_flagged(bio, BIO_SEG_VALID))
304d6d48196SJens Axboe 		blk_recount_segments(q, bio);
3052cdf79caSJens Axboe 	if (!bio_flagged(req->bio, BIO_SEG_VALID))
306d6d48196SJens Axboe 		blk_recount_segments(q, req->bio);
307d6d48196SJens Axboe 
308d6d48196SJens Axboe 	return ll_new_hw_segment(q, req, bio);
309d6d48196SJens Axboe }
310d6d48196SJens Axboe 
311e7e24500SJens Axboe /*
312e7e24500SJens Axboe  * blk-mq uses req->special to carry normal driver per-request payload, it
313e7e24500SJens Axboe  * does not indicate a prepared command that we cannot merge with.
314e7e24500SJens Axboe  */
315e7e24500SJens Axboe static bool req_no_special_merge(struct request *req)
316e7e24500SJens Axboe {
317e7e24500SJens Axboe 	struct request_queue *q = req->q;
318e7e24500SJens Axboe 
319e7e24500SJens Axboe 	return !q->mq_ops && req->special;
320e7e24500SJens Axboe }
321e7e24500SJens Axboe 
322d6d48196SJens Axboe static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
323d6d48196SJens Axboe 				struct request *next)
324d6d48196SJens Axboe {
325d6d48196SJens Axboe 	int total_phys_segments;
32686771427SFUJITA Tomonori 	unsigned int seg_size =
32786771427SFUJITA Tomonori 		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
328d6d48196SJens Axboe 
329d6d48196SJens Axboe 	/*
330d6d48196SJens Axboe 	 * First check if the either of the requests are re-queued
331d6d48196SJens Axboe 	 * requests.  Can't merge them if they are.
332d6d48196SJens Axboe 	 */
333e7e24500SJens Axboe 	if (req_no_special_merge(req) || req_no_special_merge(next))
334d6d48196SJens Axboe 		return 0;
335d6d48196SJens Axboe 
336d6d48196SJens Axboe 	/*
337d6d48196SJens Axboe 	 * Will it become too large?
338d6d48196SJens Axboe 	 */
339f31dc1cdSMartin K. Petersen 	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
340f31dc1cdSMartin K. Petersen 	    blk_rq_get_max_sectors(req))
341d6d48196SJens Axboe 		return 0;
342d6d48196SJens Axboe 
343d6d48196SJens Axboe 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
34486771427SFUJITA Tomonori 	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
34586771427SFUJITA Tomonori 		if (req->nr_phys_segments == 1)
34686771427SFUJITA Tomonori 			req->bio->bi_seg_front_size = seg_size;
34786771427SFUJITA Tomonori 		if (next->nr_phys_segments == 1)
34886771427SFUJITA Tomonori 			next->biotail->bi_seg_back_size = seg_size;
349d6d48196SJens Axboe 		total_phys_segments--;
35086771427SFUJITA Tomonori 	}
351d6d48196SJens Axboe 
3528a78362cSMartin K. Petersen 	if (total_phys_segments > queue_max_segments(q))
353d6d48196SJens Axboe 		return 0;
354d6d48196SJens Axboe 
35513f05c8dSMartin K. Petersen 	if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
35613f05c8dSMartin K. Petersen 		return 0;
35713f05c8dSMartin K. Petersen 
358d6d48196SJens Axboe 	/* Merge is OK... */
359d6d48196SJens Axboe 	req->nr_phys_segments = total_phys_segments;
360d6d48196SJens Axboe 	return 1;
361d6d48196SJens Axboe }
362d6d48196SJens Axboe 
36380a761fdSTejun Heo /**
36480a761fdSTejun Heo  * blk_rq_set_mixed_merge - mark a request as mixed merge
36580a761fdSTejun Heo  * @rq: request to mark as mixed merge
36680a761fdSTejun Heo  *
36780a761fdSTejun Heo  * Description:
36880a761fdSTejun Heo  *     @rq is about to be mixed merged.  Make sure the attributes
36980a761fdSTejun Heo  *     which can be mixed are set in each bio and mark @rq as mixed
37080a761fdSTejun Heo  *     merged.
37180a761fdSTejun Heo  */
37280a761fdSTejun Heo void blk_rq_set_mixed_merge(struct request *rq)
37380a761fdSTejun Heo {
37480a761fdSTejun Heo 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
37580a761fdSTejun Heo 	struct bio *bio;
37680a761fdSTejun Heo 
37780a761fdSTejun Heo 	if (rq->cmd_flags & REQ_MIXED_MERGE)
37880a761fdSTejun Heo 		return;
37980a761fdSTejun Heo 
38080a761fdSTejun Heo 	/*
38180a761fdSTejun Heo 	 * @rq will no longer represent mixable attributes for all the
38280a761fdSTejun Heo 	 * contained bios.  It will just track those of the first one.
38380a761fdSTejun Heo 	 * Distributes the attributs to each bio.
38480a761fdSTejun Heo 	 */
38580a761fdSTejun Heo 	for (bio = rq->bio; bio; bio = bio->bi_next) {
38680a761fdSTejun Heo 		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
38780a761fdSTejun Heo 			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
38880a761fdSTejun Heo 		bio->bi_rw |= ff;
38980a761fdSTejun Heo 	}
39080a761fdSTejun Heo 	rq->cmd_flags |= REQ_MIXED_MERGE;
39180a761fdSTejun Heo }
39280a761fdSTejun Heo 
39326308eabSJerome Marchand static void blk_account_io_merge(struct request *req)
39426308eabSJerome Marchand {
39526308eabSJerome Marchand 	if (blk_do_io_stat(req)) {
39626308eabSJerome Marchand 		struct hd_struct *part;
39726308eabSJerome Marchand 		int cpu;
39826308eabSJerome Marchand 
39926308eabSJerome Marchand 		cpu = part_stat_lock();
40009e099d4SJerome Marchand 		part = req->part;
40126308eabSJerome Marchand 
40226308eabSJerome Marchand 		part_round_stats(cpu, part);
403316d315bSNikanth Karthikesan 		part_dec_in_flight(part, rq_data_dir(req));
40426308eabSJerome Marchand 
4056c23a968SJens Axboe 		hd_struct_put(part);
40626308eabSJerome Marchand 		part_stat_unlock();
40726308eabSJerome Marchand 	}
40826308eabSJerome Marchand }
40926308eabSJerome Marchand 
410d6d48196SJens Axboe /*
411d6d48196SJens Axboe  * Has to be called with the request spinlock acquired
412d6d48196SJens Axboe  */
413d6d48196SJens Axboe static int attempt_merge(struct request_queue *q, struct request *req,
414d6d48196SJens Axboe 			  struct request *next)
415d6d48196SJens Axboe {
416d6d48196SJens Axboe 	if (!rq_mergeable(req) || !rq_mergeable(next))
417d6d48196SJens Axboe 		return 0;
418d6d48196SJens Axboe 
419f31dc1cdSMartin K. Petersen 	if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
420f31dc1cdSMartin K. Petersen 		return 0;
421f31dc1cdSMartin K. Petersen 
422d6d48196SJens Axboe 	/*
423d6d48196SJens Axboe 	 * not contiguous
424d6d48196SJens Axboe 	 */
42583096ebfSTejun Heo 	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
426d6d48196SJens Axboe 		return 0;
427d6d48196SJens Axboe 
428d6d48196SJens Axboe 	if (rq_data_dir(req) != rq_data_dir(next)
429d6d48196SJens Axboe 	    || req->rq_disk != next->rq_disk
430e7e24500SJens Axboe 	    || req_no_special_merge(next))
431d6d48196SJens Axboe 		return 0;
432d6d48196SJens Axboe 
4334363ac7cSMartin K. Petersen 	if (req->cmd_flags & REQ_WRITE_SAME &&
4344363ac7cSMartin K. Petersen 	    !blk_write_same_mergeable(req->bio, next->bio))
4354363ac7cSMartin K. Petersen 		return 0;
4364363ac7cSMartin K. Petersen 
437d6d48196SJens Axboe 	/*
438d6d48196SJens Axboe 	 * If we are allowed to merge, then append bio list
439d6d48196SJens Axboe 	 * from next to rq and release next. merge_requests_fn
440d6d48196SJens Axboe 	 * will have updated segment counts, update sector
441d6d48196SJens Axboe 	 * counts here.
442d6d48196SJens Axboe 	 */
443d6d48196SJens Axboe 	if (!ll_merge_requests_fn(q, req, next))
444d6d48196SJens Axboe 		return 0;
445d6d48196SJens Axboe 
446d6d48196SJens Axboe 	/*
44780a761fdSTejun Heo 	 * If failfast settings disagree or any of the two is already
44880a761fdSTejun Heo 	 * a mixed merge, mark both as mixed before proceeding.  This
44980a761fdSTejun Heo 	 * makes sure that all involved bios have mixable attributes
45080a761fdSTejun Heo 	 * set properly.
45180a761fdSTejun Heo 	 */
45280a761fdSTejun Heo 	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
45380a761fdSTejun Heo 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
45480a761fdSTejun Heo 	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
45580a761fdSTejun Heo 		blk_rq_set_mixed_merge(req);
45680a761fdSTejun Heo 		blk_rq_set_mixed_merge(next);
45780a761fdSTejun Heo 	}
45880a761fdSTejun Heo 
45980a761fdSTejun Heo 	/*
460d6d48196SJens Axboe 	 * At this point we have either done a back merge
461d6d48196SJens Axboe 	 * or front merge. We need the smaller start_time of
462d6d48196SJens Axboe 	 * the merged requests to be the current request
463d6d48196SJens Axboe 	 * for accounting purposes.
464d6d48196SJens Axboe 	 */
465d6d48196SJens Axboe 	if (time_after(req->start_time, next->start_time))
466d6d48196SJens Axboe 		req->start_time = next->start_time;
467d6d48196SJens Axboe 
468d6d48196SJens Axboe 	req->biotail->bi_next = next->bio;
469d6d48196SJens Axboe 	req->biotail = next->biotail;
470d6d48196SJens Axboe 
471a2dec7b3STejun Heo 	req->__data_len += blk_rq_bytes(next);
472d6d48196SJens Axboe 
473d6d48196SJens Axboe 	elv_merge_requests(q, req, next);
474d6d48196SJens Axboe 
47542dad764SJerome Marchand 	/*
47642dad764SJerome Marchand 	 * 'next' is going away, so update stats accordingly
47742dad764SJerome Marchand 	 */
47842dad764SJerome Marchand 	blk_account_io_merge(next);
479d6d48196SJens Axboe 
480d6d48196SJens Axboe 	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
481ab780f1eSJens Axboe 	if (blk_rq_cpu_valid(next))
482ab780f1eSJens Axboe 		req->cpu = next->cpu;
483d6d48196SJens Axboe 
4841cd96c24SBoaz Harrosh 	/* owner-ship of bio passed from next to req */
4851cd96c24SBoaz Harrosh 	next->bio = NULL;
486d6d48196SJens Axboe 	__blk_put_request(q, next);
487d6d48196SJens Axboe 	return 1;
488d6d48196SJens Axboe }
489d6d48196SJens Axboe 
490d6d48196SJens Axboe int attempt_back_merge(struct request_queue *q, struct request *rq)
491d6d48196SJens Axboe {
492d6d48196SJens Axboe 	struct request *next = elv_latter_request(q, rq);
493d6d48196SJens Axboe 
494d6d48196SJens Axboe 	if (next)
495d6d48196SJens Axboe 		return attempt_merge(q, rq, next);
496d6d48196SJens Axboe 
497d6d48196SJens Axboe 	return 0;
498d6d48196SJens Axboe }
499d6d48196SJens Axboe 
500d6d48196SJens Axboe int attempt_front_merge(struct request_queue *q, struct request *rq)
501d6d48196SJens Axboe {
502d6d48196SJens Axboe 	struct request *prev = elv_former_request(q, rq);
503d6d48196SJens Axboe 
504d6d48196SJens Axboe 	if (prev)
505d6d48196SJens Axboe 		return attempt_merge(q, prev, rq);
506d6d48196SJens Axboe 
507d6d48196SJens Axboe 	return 0;
508d6d48196SJens Axboe }
5095e84ea3aSJens Axboe 
5105e84ea3aSJens Axboe int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
5115e84ea3aSJens Axboe 			  struct request *next)
5125e84ea3aSJens Axboe {
5135e84ea3aSJens Axboe 	return attempt_merge(q, rq, next);
5145e84ea3aSJens Axboe }
515050c8ea8STejun Heo 
516050c8ea8STejun Heo bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
517050c8ea8STejun Heo {
518e2a60da7SMartin K. Petersen 	if (!rq_mergeable(rq) || !bio_mergeable(bio))
519050c8ea8STejun Heo 		return false;
520050c8ea8STejun Heo 
521f31dc1cdSMartin K. Petersen 	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
522f31dc1cdSMartin K. Petersen 		return false;
523f31dc1cdSMartin K. Petersen 
524050c8ea8STejun Heo 	/* different data direction or already started, don't merge */
525050c8ea8STejun Heo 	if (bio_data_dir(bio) != rq_data_dir(rq))
526050c8ea8STejun Heo 		return false;
527050c8ea8STejun Heo 
528050c8ea8STejun Heo 	/* must be same device and not a special request */
529e7e24500SJens Axboe 	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
530050c8ea8STejun Heo 		return false;
531050c8ea8STejun Heo 
532050c8ea8STejun Heo 	/* only merge integrity protected bio into ditto rq */
533050c8ea8STejun Heo 	if (bio_integrity(bio) != blk_integrity_rq(rq))
534050c8ea8STejun Heo 		return false;
535050c8ea8STejun Heo 
5364363ac7cSMartin K. Petersen 	/* must be using the same buffer */
5374363ac7cSMartin K. Petersen 	if (rq->cmd_flags & REQ_WRITE_SAME &&
5384363ac7cSMartin K. Petersen 	    !blk_write_same_mergeable(rq->bio, bio))
5394363ac7cSMartin K. Petersen 		return false;
5404363ac7cSMartin K. Petersen 
541050c8ea8STejun Heo 	return true;
542050c8ea8STejun Heo }
543050c8ea8STejun Heo 
544050c8ea8STejun Heo int blk_try_merge(struct request *rq, struct bio *bio)
545050c8ea8STejun Heo {
5464f024f37SKent Overstreet 	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
547050c8ea8STejun Heo 		return ELEVATOR_BACK_MERGE;
5484f024f37SKent Overstreet 	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
549050c8ea8STejun Heo 		return ELEVATOR_FRONT_MERGE;
550050c8ea8STejun Heo 	return ELEVATOR_NO_MERGE;
551050c8ea8STejun Heo }
552