xref: /openbmc/linux/block/blk-merge.c (revision 85b9f66a41eb8ee3f1dfc95707412705463cdd97)
1d6d48196SJens Axboe /*
2d6d48196SJens Axboe  * Functions related to segment and merge handling
3d6d48196SJens Axboe  */
4d6d48196SJens Axboe #include <linux/kernel.h>
5d6d48196SJens Axboe #include <linux/module.h>
6d6d48196SJens Axboe #include <linux/bio.h>
7d6d48196SJens Axboe #include <linux/blkdev.h>
8d6d48196SJens Axboe #include <linux/scatterlist.h>
9d6d48196SJens Axboe 
10d6d48196SJens Axboe #include "blk.h"
11d6d48196SJens Axboe 
121e428079SJens Axboe static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
1359247eaeSJens Axboe 					     struct bio *bio)
14d6d48196SJens Axboe {
15d6d48196SJens Axboe 	struct bio_vec *bv, *bvprv = NULL;
161e428079SJens Axboe 	int cluster, i, high, highprv = 1;
171e428079SJens Axboe 	unsigned int seg_size, nr_phys_segs;
1859247eaeSJens Axboe 	struct bio *fbio, *bbio;
19d6d48196SJens Axboe 
201e428079SJens Axboe 	if (!bio)
211e428079SJens Axboe 		return 0;
22d6d48196SJens Axboe 
231e428079SJens Axboe 	fbio = bio;
24e692cb66SMartin K. Petersen 	cluster = blk_queue_cluster(q);
255df97b91SMikulas Patocka 	seg_size = 0;
262c8919deSAndi Kleen 	nr_phys_segs = 0;
271e428079SJens Axboe 	for_each_bio(bio) {
281e428079SJens Axboe 		bio_for_each_segment(bv, bio, i) {
29d6d48196SJens Axboe 			/*
301e428079SJens Axboe 			 * the trick here is making sure that a high page is
311e428079SJens Axboe 			 * never considered part of another segment, since that
321e428079SJens Axboe 			 * might change with the bounce page.
33d6d48196SJens Axboe 			 */
34ae03bf63SMartin K. Petersen 			high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
35d6d48196SJens Axboe 			if (high || highprv)
36b8b3e16cSMikulas Patocka 				goto new_segment;
37d6d48196SJens Axboe 			if (cluster) {
38ae03bf63SMartin K. Petersen 				if (seg_size + bv->bv_len
39ae03bf63SMartin K. Petersen 				    > queue_max_segment_size(q))
40d6d48196SJens Axboe 					goto new_segment;
41d6d48196SJens Axboe 				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
42d6d48196SJens Axboe 					goto new_segment;
43d6d48196SJens Axboe 				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
44d6d48196SJens Axboe 					goto new_segment;
45d6d48196SJens Axboe 
46d6d48196SJens Axboe 				seg_size += bv->bv_len;
47d6d48196SJens Axboe 				bvprv = bv;
48d6d48196SJens Axboe 				continue;
49d6d48196SJens Axboe 			}
50d6d48196SJens Axboe new_segment:
511e428079SJens Axboe 			if (nr_phys_segs == 1 && seg_size >
521e428079SJens Axboe 			    fbio->bi_seg_front_size)
531e428079SJens Axboe 				fbio->bi_seg_front_size = seg_size;
5486771427SFUJITA Tomonori 
55d6d48196SJens Axboe 			nr_phys_segs++;
56d6d48196SJens Axboe 			bvprv = bv;
57d6d48196SJens Axboe 			seg_size = bv->bv_len;
58d6d48196SJens Axboe 			highprv = high;
59d6d48196SJens Axboe 		}
6059247eaeSJens Axboe 		bbio = bio;
611e428079SJens Axboe 	}
62d6d48196SJens Axboe 
6359247eaeSJens Axboe 	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
6459247eaeSJens Axboe 		fbio->bi_seg_front_size = seg_size;
6559247eaeSJens Axboe 	if (seg_size > bbio->bi_seg_back_size)
6659247eaeSJens Axboe 		bbio->bi_seg_back_size = seg_size;
671e428079SJens Axboe 
681e428079SJens Axboe 	return nr_phys_segs;
691e428079SJens Axboe }
701e428079SJens Axboe 
711e428079SJens Axboe void blk_recalc_rq_segments(struct request *rq)
721e428079SJens Axboe {
7359247eaeSJens Axboe 	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
74d6d48196SJens Axboe }
75d6d48196SJens Axboe 
76d6d48196SJens Axboe void blk_recount_segments(struct request_queue *q, struct bio *bio)
77d6d48196SJens Axboe {
78d6d48196SJens Axboe 	struct bio *nxt = bio->bi_next;
791e428079SJens Axboe 
80d6d48196SJens Axboe 	bio->bi_next = NULL;
8159247eaeSJens Axboe 	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
82d6d48196SJens Axboe 	bio->bi_next = nxt;
83d6d48196SJens Axboe 	bio->bi_flags |= (1 << BIO_SEG_VALID);
84d6d48196SJens Axboe }
85d6d48196SJens Axboe EXPORT_SYMBOL(blk_recount_segments);
86d6d48196SJens Axboe 
87d6d48196SJens Axboe static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
88d6d48196SJens Axboe 				   struct bio *nxt)
89d6d48196SJens Axboe {
90e692cb66SMartin K. Petersen 	if (!blk_queue_cluster(q))
91d6d48196SJens Axboe 		return 0;
92d6d48196SJens Axboe 
9386771427SFUJITA Tomonori 	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
94ae03bf63SMartin K. Petersen 	    queue_max_segment_size(q))
95d6d48196SJens Axboe 		return 0;
96d6d48196SJens Axboe 
97e17fc0a1SDavid Woodhouse 	if (!bio_has_data(bio))
98e17fc0a1SDavid Woodhouse 		return 1;
99e17fc0a1SDavid Woodhouse 
100e17fc0a1SDavid Woodhouse 	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
101e17fc0a1SDavid Woodhouse 		return 0;
102e17fc0a1SDavid Woodhouse 
103d6d48196SJens Axboe 	/*
104e17fc0a1SDavid Woodhouse 	 * bio and nxt are contiguous in memory; check if the queue allows
105d6d48196SJens Axboe 	 * these two to be merged into one
106d6d48196SJens Axboe 	 */
107d6d48196SJens Axboe 	if (BIO_SEG_BOUNDARY(q, bio, nxt))
108d6d48196SJens Axboe 		return 1;
109d6d48196SJens Axboe 
110d6d48196SJens Axboe 	return 0;
111d6d48196SJens Axboe }
112d6d48196SJens Axboe 
113963ab9e5SAsias He static void
114963ab9e5SAsias He __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
115963ab9e5SAsias He 		     struct scatterlist *sglist, struct bio_vec **bvprv,
116963ab9e5SAsias He 		     struct scatterlist **sg, int *nsegs, int *cluster)
117963ab9e5SAsias He {
118963ab9e5SAsias He 
119963ab9e5SAsias He 	int nbytes = bvec->bv_len;
120963ab9e5SAsias He 
121963ab9e5SAsias He 	if (*bvprv && *cluster) {
122963ab9e5SAsias He 		if ((*sg)->length + nbytes > queue_max_segment_size(q))
123963ab9e5SAsias He 			goto new_segment;
124963ab9e5SAsias He 
125963ab9e5SAsias He 		if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
126963ab9e5SAsias He 			goto new_segment;
127963ab9e5SAsias He 		if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
128963ab9e5SAsias He 			goto new_segment;
129963ab9e5SAsias He 
130963ab9e5SAsias He 		(*sg)->length += nbytes;
131963ab9e5SAsias He 	} else {
132963ab9e5SAsias He new_segment:
133963ab9e5SAsias He 		if (!*sg)
134963ab9e5SAsias He 			*sg = sglist;
135963ab9e5SAsias He 		else {
136963ab9e5SAsias He 			/*
137963ab9e5SAsias He 			 * If the driver previously mapped a shorter
138963ab9e5SAsias He 			 * list, we could see a termination bit
139963ab9e5SAsias He 			 * prematurely unless it fully inits the sg
140963ab9e5SAsias He 			 * table on each mapping. We KNOW that there
141963ab9e5SAsias He 			 * must be more entries here or the driver
142963ab9e5SAsias He 			 * would be buggy, so force clear the
143963ab9e5SAsias He 			 * termination bit to avoid doing a full
144963ab9e5SAsias He 			 * sg_init_table() in drivers for each command.
145963ab9e5SAsias He 			 */
146963ab9e5SAsias He 			(*sg)->page_link &= ~0x02;
147963ab9e5SAsias He 			*sg = sg_next(*sg);
148963ab9e5SAsias He 		}
149963ab9e5SAsias He 
150963ab9e5SAsias He 		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
151963ab9e5SAsias He 		(*nsegs)++;
152963ab9e5SAsias He 	}
153963ab9e5SAsias He 	*bvprv = bvec;
154963ab9e5SAsias He }
155963ab9e5SAsias He 
156d6d48196SJens Axboe /*
157d6d48196SJens Axboe  * map a request to scatterlist, return number of sg entries setup. Caller
158d6d48196SJens Axboe  * must make sure sg can hold rq->nr_phys_segments entries
159d6d48196SJens Axboe  */
160d6d48196SJens Axboe int blk_rq_map_sg(struct request_queue *q, struct request *rq,
161d6d48196SJens Axboe 		  struct scatterlist *sglist)
162d6d48196SJens Axboe {
163d6d48196SJens Axboe 	struct bio_vec *bvec, *bvprv;
164d6d48196SJens Axboe 	struct req_iterator iter;
165d6d48196SJens Axboe 	struct scatterlist *sg;
166d6d48196SJens Axboe 	int nsegs, cluster;
167d6d48196SJens Axboe 
168d6d48196SJens Axboe 	nsegs = 0;
169e692cb66SMartin K. Petersen 	cluster = blk_queue_cluster(q);
170d6d48196SJens Axboe 
171d6d48196SJens Axboe 	/*
172d6d48196SJens Axboe 	 * for each bio in rq
173d6d48196SJens Axboe 	 */
174d6d48196SJens Axboe 	bvprv = NULL;
175d6d48196SJens Axboe 	sg = NULL;
176d6d48196SJens Axboe 	rq_for_each_segment(bvec, rq, iter) {
177963ab9e5SAsias He 		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
178963ab9e5SAsias He 				     &nsegs, &cluster);
179d6d48196SJens Axboe 	} /* segments in rq */
180d6d48196SJens Axboe 
181f18573abSFUJITA Tomonori 
182f18573abSFUJITA Tomonori 	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
1832e46e8b2STejun Heo 	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
1842e46e8b2STejun Heo 		unsigned int pad_len =
1852e46e8b2STejun Heo 			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
186f18573abSFUJITA Tomonori 
187f18573abSFUJITA Tomonori 		sg->length += pad_len;
188f18573abSFUJITA Tomonori 		rq->extra_len += pad_len;
189f18573abSFUJITA Tomonori 	}
190f18573abSFUJITA Tomonori 
1912fb98e84STejun Heo 	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
1927b6d91daSChristoph Hellwig 		if (rq->cmd_flags & REQ_WRITE)
193db0a2e00STejun Heo 			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
194db0a2e00STejun Heo 
195d6d48196SJens Axboe 		sg->page_link &= ~0x02;
196d6d48196SJens Axboe 		sg = sg_next(sg);
197d6d48196SJens Axboe 		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
198d6d48196SJens Axboe 			    q->dma_drain_size,
199d6d48196SJens Axboe 			    ((unsigned long)q->dma_drain_buffer) &
200d6d48196SJens Axboe 			    (PAGE_SIZE - 1));
201d6d48196SJens Axboe 		nsegs++;
2027a85f889SFUJITA Tomonori 		rq->extra_len += q->dma_drain_size;
203d6d48196SJens Axboe 	}
204d6d48196SJens Axboe 
205d6d48196SJens Axboe 	if (sg)
206d6d48196SJens Axboe 		sg_mark_end(sg);
207d6d48196SJens Axboe 
208d6d48196SJens Axboe 	return nsegs;
209d6d48196SJens Axboe }
210d6d48196SJens Axboe EXPORT_SYMBOL(blk_rq_map_sg);
211d6d48196SJens Axboe 
212*85b9f66aSAsias He /**
213*85b9f66aSAsias He  * blk_bio_map_sg - map a bio to a scatterlist
214*85b9f66aSAsias He  * @q: request_queue in question
215*85b9f66aSAsias He  * @bio: bio being mapped
216*85b9f66aSAsias He  * @sglist: scatterlist being mapped
217*85b9f66aSAsias He  *
218*85b9f66aSAsias He  * Note:
219*85b9f66aSAsias He  *    Caller must make sure sg can hold bio->bi_phys_segments entries
220*85b9f66aSAsias He  *
221*85b9f66aSAsias He  * Will return the number of sg entries setup
222*85b9f66aSAsias He  */
223*85b9f66aSAsias He int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
224*85b9f66aSAsias He 		   struct scatterlist *sglist)
225*85b9f66aSAsias He {
226*85b9f66aSAsias He 	struct bio_vec *bvec, *bvprv;
227*85b9f66aSAsias He 	struct scatterlist *sg;
228*85b9f66aSAsias He 	int nsegs, cluster;
229*85b9f66aSAsias He 	unsigned long i;
230*85b9f66aSAsias He 
231*85b9f66aSAsias He 	nsegs = 0;
232*85b9f66aSAsias He 	cluster = blk_queue_cluster(q);
233*85b9f66aSAsias He 
234*85b9f66aSAsias He 	bvprv = NULL;
235*85b9f66aSAsias He 	sg = NULL;
236*85b9f66aSAsias He 	bio_for_each_segment(bvec, bio, i) {
237*85b9f66aSAsias He 		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
238*85b9f66aSAsias He 				     &nsegs, &cluster);
239*85b9f66aSAsias He 	} /* segments in bio */
240*85b9f66aSAsias He 
241*85b9f66aSAsias He 	if (sg)
242*85b9f66aSAsias He 		sg_mark_end(sg);
243*85b9f66aSAsias He 
244*85b9f66aSAsias He 	BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
245*85b9f66aSAsias He 	return nsegs;
246*85b9f66aSAsias He }
247*85b9f66aSAsias He EXPORT_SYMBOL(blk_bio_map_sg);
248*85b9f66aSAsias He 
249d6d48196SJens Axboe static inline int ll_new_hw_segment(struct request_queue *q,
250d6d48196SJens Axboe 				    struct request *req,
251d6d48196SJens Axboe 				    struct bio *bio)
252d6d48196SJens Axboe {
253d6d48196SJens Axboe 	int nr_phys_segs = bio_phys_segments(q, bio);
254d6d48196SJens Axboe 
25513f05c8dSMartin K. Petersen 	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
25613f05c8dSMartin K. Petersen 		goto no_merge;
25713f05c8dSMartin K. Petersen 
25813f05c8dSMartin K. Petersen 	if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
25913f05c8dSMartin K. Petersen 		goto no_merge;
260d6d48196SJens Axboe 
261d6d48196SJens Axboe 	/*
262d6d48196SJens Axboe 	 * This will form the start of a new hw segment.  Bump both
263d6d48196SJens Axboe 	 * counters.
264d6d48196SJens Axboe 	 */
265d6d48196SJens Axboe 	req->nr_phys_segments += nr_phys_segs;
266d6d48196SJens Axboe 	return 1;
26713f05c8dSMartin K. Petersen 
26813f05c8dSMartin K. Petersen no_merge:
26913f05c8dSMartin K. Petersen 	req->cmd_flags |= REQ_NOMERGE;
27013f05c8dSMartin K. Petersen 	if (req == q->last_merge)
27113f05c8dSMartin K. Petersen 		q->last_merge = NULL;
27213f05c8dSMartin K. Petersen 	return 0;
273d6d48196SJens Axboe }
274d6d48196SJens Axboe 
275d6d48196SJens Axboe int ll_back_merge_fn(struct request_queue *q, struct request *req,
276d6d48196SJens Axboe 		     struct bio *bio)
277d6d48196SJens Axboe {
278d6d48196SJens Axboe 	unsigned short max_sectors;
279d6d48196SJens Axboe 
28033659ebbSChristoph Hellwig 	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
281ae03bf63SMartin K. Petersen 		max_sectors = queue_max_hw_sectors(q);
282d6d48196SJens Axboe 	else
283ae03bf63SMartin K. Petersen 		max_sectors = queue_max_sectors(q);
284d6d48196SJens Axboe 
28583096ebfSTejun Heo 	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
286d6d48196SJens Axboe 		req->cmd_flags |= REQ_NOMERGE;
287d6d48196SJens Axboe 		if (req == q->last_merge)
288d6d48196SJens Axboe 			q->last_merge = NULL;
289d6d48196SJens Axboe 		return 0;
290d6d48196SJens Axboe 	}
2912cdf79caSJens Axboe 	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
292d6d48196SJens Axboe 		blk_recount_segments(q, req->biotail);
2932cdf79caSJens Axboe 	if (!bio_flagged(bio, BIO_SEG_VALID))
294d6d48196SJens Axboe 		blk_recount_segments(q, bio);
295d6d48196SJens Axboe 
296d6d48196SJens Axboe 	return ll_new_hw_segment(q, req, bio);
297d6d48196SJens Axboe }
298d6d48196SJens Axboe 
299d6d48196SJens Axboe int ll_front_merge_fn(struct request_queue *q, struct request *req,
300d6d48196SJens Axboe 		      struct bio *bio)
301d6d48196SJens Axboe {
302d6d48196SJens Axboe 	unsigned short max_sectors;
303d6d48196SJens Axboe 
30433659ebbSChristoph Hellwig 	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
305ae03bf63SMartin K. Petersen 		max_sectors = queue_max_hw_sectors(q);
306d6d48196SJens Axboe 	else
307ae03bf63SMartin K. Petersen 		max_sectors = queue_max_sectors(q);
308d6d48196SJens Axboe 
309d6d48196SJens Axboe 
31083096ebfSTejun Heo 	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
311d6d48196SJens Axboe 		req->cmd_flags |= REQ_NOMERGE;
312d6d48196SJens Axboe 		if (req == q->last_merge)
313d6d48196SJens Axboe 			q->last_merge = NULL;
314d6d48196SJens Axboe 		return 0;
315d6d48196SJens Axboe 	}
3162cdf79caSJens Axboe 	if (!bio_flagged(bio, BIO_SEG_VALID))
317d6d48196SJens Axboe 		blk_recount_segments(q, bio);
3182cdf79caSJens Axboe 	if (!bio_flagged(req->bio, BIO_SEG_VALID))
319d6d48196SJens Axboe 		blk_recount_segments(q, req->bio);
320d6d48196SJens Axboe 
321d6d48196SJens Axboe 	return ll_new_hw_segment(q, req, bio);
322d6d48196SJens Axboe }
323d6d48196SJens Axboe 
324d6d48196SJens Axboe static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
325d6d48196SJens Axboe 				struct request *next)
326d6d48196SJens Axboe {
327d6d48196SJens Axboe 	int total_phys_segments;
32886771427SFUJITA Tomonori 	unsigned int seg_size =
32986771427SFUJITA Tomonori 		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
330d6d48196SJens Axboe 
331d6d48196SJens Axboe 	/*
332d6d48196SJens Axboe 	 * First check if the either of the requests are re-queued
333d6d48196SJens Axboe 	 * requests.  Can't merge them if they are.
334d6d48196SJens Axboe 	 */
335d6d48196SJens Axboe 	if (req->special || next->special)
336d6d48196SJens Axboe 		return 0;
337d6d48196SJens Axboe 
338d6d48196SJens Axboe 	/*
339d6d48196SJens Axboe 	 * Will it become too large?
340d6d48196SJens Axboe 	 */
341ae03bf63SMartin K. Petersen 	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
342d6d48196SJens Axboe 		return 0;
343d6d48196SJens Axboe 
344d6d48196SJens Axboe 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
34586771427SFUJITA Tomonori 	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
34686771427SFUJITA Tomonori 		if (req->nr_phys_segments == 1)
34786771427SFUJITA Tomonori 			req->bio->bi_seg_front_size = seg_size;
34886771427SFUJITA Tomonori 		if (next->nr_phys_segments == 1)
34986771427SFUJITA Tomonori 			next->biotail->bi_seg_back_size = seg_size;
350d6d48196SJens Axboe 		total_phys_segments--;
35186771427SFUJITA Tomonori 	}
352d6d48196SJens Axboe 
3538a78362cSMartin K. Petersen 	if (total_phys_segments > queue_max_segments(q))
354d6d48196SJens Axboe 		return 0;
355d6d48196SJens Axboe 
35613f05c8dSMartin K. Petersen 	if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
35713f05c8dSMartin K. Petersen 		return 0;
35813f05c8dSMartin K. Petersen 
359d6d48196SJens Axboe 	/* Merge is OK... */
360d6d48196SJens Axboe 	req->nr_phys_segments = total_phys_segments;
361d6d48196SJens Axboe 	return 1;
362d6d48196SJens Axboe }
363d6d48196SJens Axboe 
36480a761fdSTejun Heo /**
36580a761fdSTejun Heo  * blk_rq_set_mixed_merge - mark a request as mixed merge
36680a761fdSTejun Heo  * @rq: request to mark as mixed merge
36780a761fdSTejun Heo  *
36880a761fdSTejun Heo  * Description:
36980a761fdSTejun Heo  *     @rq is about to be mixed merged.  Make sure the attributes
37080a761fdSTejun Heo  *     which can be mixed are set in each bio and mark @rq as mixed
37180a761fdSTejun Heo  *     merged.
37280a761fdSTejun Heo  */
37380a761fdSTejun Heo void blk_rq_set_mixed_merge(struct request *rq)
37480a761fdSTejun Heo {
37580a761fdSTejun Heo 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
37680a761fdSTejun Heo 	struct bio *bio;
37780a761fdSTejun Heo 
37880a761fdSTejun Heo 	if (rq->cmd_flags & REQ_MIXED_MERGE)
37980a761fdSTejun Heo 		return;
38080a761fdSTejun Heo 
38180a761fdSTejun Heo 	/*
38280a761fdSTejun Heo 	 * @rq will no longer represent mixable attributes for all the
38380a761fdSTejun Heo 	 * contained bios.  It will just track those of the first one.
38480a761fdSTejun Heo 	 * Distributes the attributs to each bio.
38580a761fdSTejun Heo 	 */
38680a761fdSTejun Heo 	for (bio = rq->bio; bio; bio = bio->bi_next) {
38780a761fdSTejun Heo 		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
38880a761fdSTejun Heo 			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
38980a761fdSTejun Heo 		bio->bi_rw |= ff;
39080a761fdSTejun Heo 	}
39180a761fdSTejun Heo 	rq->cmd_flags |= REQ_MIXED_MERGE;
39280a761fdSTejun Heo }
39380a761fdSTejun Heo 
39426308eabSJerome Marchand static void blk_account_io_merge(struct request *req)
39526308eabSJerome Marchand {
39626308eabSJerome Marchand 	if (blk_do_io_stat(req)) {
39726308eabSJerome Marchand 		struct hd_struct *part;
39826308eabSJerome Marchand 		int cpu;
39926308eabSJerome Marchand 
40026308eabSJerome Marchand 		cpu = part_stat_lock();
40109e099d4SJerome Marchand 		part = req->part;
40226308eabSJerome Marchand 
40326308eabSJerome Marchand 		part_round_stats(cpu, part);
404316d315bSNikanth Karthikesan 		part_dec_in_flight(part, rq_data_dir(req));
40526308eabSJerome Marchand 
4066c23a968SJens Axboe 		hd_struct_put(part);
40726308eabSJerome Marchand 		part_stat_unlock();
40826308eabSJerome Marchand 	}
40926308eabSJerome Marchand }
41026308eabSJerome Marchand 
411d6d48196SJens Axboe /*
412d6d48196SJens Axboe  * Has to be called with the request spinlock acquired
413d6d48196SJens Axboe  */
414d6d48196SJens Axboe static int attempt_merge(struct request_queue *q, struct request *req,
415d6d48196SJens Axboe 			  struct request *next)
416d6d48196SJens Axboe {
417d6d48196SJens Axboe 	if (!rq_mergeable(req) || !rq_mergeable(next))
418d6d48196SJens Axboe 		return 0;
419d6d48196SJens Axboe 
420d6d48196SJens Axboe 	/*
421f281fb5fSAdrian Hunter 	 * Don't merge file system requests and discard requests
422f281fb5fSAdrian Hunter 	 */
423f281fb5fSAdrian Hunter 	if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
424f281fb5fSAdrian Hunter 		return 0;
425f281fb5fSAdrian Hunter 
426f281fb5fSAdrian Hunter 	/*
427f281fb5fSAdrian Hunter 	 * Don't merge discard requests and secure discard requests
428f281fb5fSAdrian Hunter 	 */
429f281fb5fSAdrian Hunter 	if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
430f281fb5fSAdrian Hunter 		return 0;
431f281fb5fSAdrian Hunter 
432f281fb5fSAdrian Hunter 	/*
433d6d48196SJens Axboe 	 * not contiguous
434d6d48196SJens Axboe 	 */
43583096ebfSTejun Heo 	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
436d6d48196SJens Axboe 		return 0;
437d6d48196SJens Axboe 
438d6d48196SJens Axboe 	if (rq_data_dir(req) != rq_data_dir(next)
439d6d48196SJens Axboe 	    || req->rq_disk != next->rq_disk
440d6d48196SJens Axboe 	    || next->special)
441d6d48196SJens Axboe 		return 0;
442d6d48196SJens Axboe 
443d6d48196SJens Axboe 	/*
444d6d48196SJens Axboe 	 * If we are allowed to merge, then append bio list
445d6d48196SJens Axboe 	 * from next to rq and release next. merge_requests_fn
446d6d48196SJens Axboe 	 * will have updated segment counts, update sector
447d6d48196SJens Axboe 	 * counts here.
448d6d48196SJens Axboe 	 */
449d6d48196SJens Axboe 	if (!ll_merge_requests_fn(q, req, next))
450d6d48196SJens Axboe 		return 0;
451d6d48196SJens Axboe 
452d6d48196SJens Axboe 	/*
45380a761fdSTejun Heo 	 * If failfast settings disagree or any of the two is already
45480a761fdSTejun Heo 	 * a mixed merge, mark both as mixed before proceeding.  This
45580a761fdSTejun Heo 	 * makes sure that all involved bios have mixable attributes
45680a761fdSTejun Heo 	 * set properly.
45780a761fdSTejun Heo 	 */
45880a761fdSTejun Heo 	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
45980a761fdSTejun Heo 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
46080a761fdSTejun Heo 	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
46180a761fdSTejun Heo 		blk_rq_set_mixed_merge(req);
46280a761fdSTejun Heo 		blk_rq_set_mixed_merge(next);
46380a761fdSTejun Heo 	}
46480a761fdSTejun Heo 
46580a761fdSTejun Heo 	/*
466d6d48196SJens Axboe 	 * At this point we have either done a back merge
467d6d48196SJens Axboe 	 * or front merge. We need the smaller start_time of
468d6d48196SJens Axboe 	 * the merged requests to be the current request
469d6d48196SJens Axboe 	 * for accounting purposes.
470d6d48196SJens Axboe 	 */
471d6d48196SJens Axboe 	if (time_after(req->start_time, next->start_time))
472d6d48196SJens Axboe 		req->start_time = next->start_time;
473d6d48196SJens Axboe 
474d6d48196SJens Axboe 	req->biotail->bi_next = next->bio;
475d6d48196SJens Axboe 	req->biotail = next->biotail;
476d6d48196SJens Axboe 
477a2dec7b3STejun Heo 	req->__data_len += blk_rq_bytes(next);
478d6d48196SJens Axboe 
479d6d48196SJens Axboe 	elv_merge_requests(q, req, next);
480d6d48196SJens Axboe 
48142dad764SJerome Marchand 	/*
48242dad764SJerome Marchand 	 * 'next' is going away, so update stats accordingly
48342dad764SJerome Marchand 	 */
48442dad764SJerome Marchand 	blk_account_io_merge(next);
485d6d48196SJens Axboe 
486d6d48196SJens Axboe 	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
487ab780f1eSJens Axboe 	if (blk_rq_cpu_valid(next))
488ab780f1eSJens Axboe 		req->cpu = next->cpu;
489d6d48196SJens Axboe 
4901cd96c24SBoaz Harrosh 	/* owner-ship of bio passed from next to req */
4911cd96c24SBoaz Harrosh 	next->bio = NULL;
492d6d48196SJens Axboe 	__blk_put_request(q, next);
493d6d48196SJens Axboe 	return 1;
494d6d48196SJens Axboe }
495d6d48196SJens Axboe 
496d6d48196SJens Axboe int attempt_back_merge(struct request_queue *q, struct request *rq)
497d6d48196SJens Axboe {
498d6d48196SJens Axboe 	struct request *next = elv_latter_request(q, rq);
499d6d48196SJens Axboe 
500d6d48196SJens Axboe 	if (next)
501d6d48196SJens Axboe 		return attempt_merge(q, rq, next);
502d6d48196SJens Axboe 
503d6d48196SJens Axboe 	return 0;
504d6d48196SJens Axboe }
505d6d48196SJens Axboe 
506d6d48196SJens Axboe int attempt_front_merge(struct request_queue *q, struct request *rq)
507d6d48196SJens Axboe {
508d6d48196SJens Axboe 	struct request *prev = elv_former_request(q, rq);
509d6d48196SJens Axboe 
510d6d48196SJens Axboe 	if (prev)
511d6d48196SJens Axboe 		return attempt_merge(q, prev, rq);
512d6d48196SJens Axboe 
513d6d48196SJens Axboe 	return 0;
514d6d48196SJens Axboe }
5155e84ea3aSJens Axboe 
5165e84ea3aSJens Axboe int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
5175e84ea3aSJens Axboe 			  struct request *next)
5185e84ea3aSJens Axboe {
5195e84ea3aSJens Axboe 	return attempt_merge(q, rq, next);
5205e84ea3aSJens Axboe }
521050c8ea8STejun Heo 
522050c8ea8STejun Heo bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
523050c8ea8STejun Heo {
524050c8ea8STejun Heo 	if (!rq_mergeable(rq))
525050c8ea8STejun Heo 		return false;
526050c8ea8STejun Heo 
527050c8ea8STejun Heo 	/* don't merge file system requests and discard requests */
528050c8ea8STejun Heo 	if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
529050c8ea8STejun Heo 		return false;
530050c8ea8STejun Heo 
531050c8ea8STejun Heo 	/* don't merge discard requests and secure discard requests */
532050c8ea8STejun Heo 	if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
533050c8ea8STejun Heo 		return false;
534050c8ea8STejun Heo 
535050c8ea8STejun Heo 	/* different data direction or already started, don't merge */
536050c8ea8STejun Heo 	if (bio_data_dir(bio) != rq_data_dir(rq))
537050c8ea8STejun Heo 		return false;
538050c8ea8STejun Heo 
539050c8ea8STejun Heo 	/* must be same device and not a special request */
540050c8ea8STejun Heo 	if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
541050c8ea8STejun Heo 		return false;
542050c8ea8STejun Heo 
543050c8ea8STejun Heo 	/* only merge integrity protected bio into ditto rq */
544050c8ea8STejun Heo 	if (bio_integrity(bio) != blk_integrity_rq(rq))
545050c8ea8STejun Heo 		return false;
546050c8ea8STejun Heo 
547050c8ea8STejun Heo 	return true;
548050c8ea8STejun Heo }
549050c8ea8STejun Heo 
550050c8ea8STejun Heo int blk_try_merge(struct request *rq, struct bio *bio)
551050c8ea8STejun Heo {
552050c8ea8STejun Heo 	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
553050c8ea8STejun Heo 		return ELEVATOR_BACK_MERGE;
554050c8ea8STejun Heo 	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
555050c8ea8STejun Heo 		return ELEVATOR_FRONT_MERGE;
556050c8ea8STejun Heo 	return ELEVATOR_NO_MERGE;
557050c8ea8STejun Heo }
558