xref: /openbmc/linux/block/blk-merge.c (revision 80a761fd)
1d6d48196SJens Axboe /*
2d6d48196SJens Axboe  * Functions related to segment and merge handling
3d6d48196SJens Axboe  */
4d6d48196SJens Axboe #include <linux/kernel.h>
5d6d48196SJens Axboe #include <linux/module.h>
6d6d48196SJens Axboe #include <linux/bio.h>
7d6d48196SJens Axboe #include <linux/blkdev.h>
8d6d48196SJens Axboe #include <linux/scatterlist.h>
9d6d48196SJens Axboe 
10d6d48196SJens Axboe #include "blk.h"
11d6d48196SJens Axboe 
121e428079SJens Axboe static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
1359247eaeSJens Axboe 					     struct bio *bio)
14d6d48196SJens Axboe {
15d6d48196SJens Axboe 	unsigned int phys_size;
16d6d48196SJens Axboe 	struct bio_vec *bv, *bvprv = NULL;
171e428079SJens Axboe 	int cluster, i, high, highprv = 1;
181e428079SJens Axboe 	unsigned int seg_size, nr_phys_segs;
1959247eaeSJens Axboe 	struct bio *fbio, *bbio;
20d6d48196SJens Axboe 
211e428079SJens Axboe 	if (!bio)
221e428079SJens Axboe 		return 0;
23d6d48196SJens Axboe 
241e428079SJens Axboe 	fbio = bio;
2575ad23bcSNick Piggin 	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
265df97b91SMikulas Patocka 	seg_size = 0;
275df97b91SMikulas Patocka 	phys_size = nr_phys_segs = 0;
281e428079SJens Axboe 	for_each_bio(bio) {
291e428079SJens Axboe 		bio_for_each_segment(bv, bio, i) {
30d6d48196SJens Axboe 			/*
311e428079SJens Axboe 			 * the trick here is making sure that a high page is
321e428079SJens Axboe 			 * never considered part of another segment, since that
331e428079SJens Axboe 			 * might change with the bounce page.
34d6d48196SJens Axboe 			 */
35ae03bf63SMartin K. Petersen 			high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
36d6d48196SJens Axboe 			if (high || highprv)
37b8b3e16cSMikulas Patocka 				goto new_segment;
38d6d48196SJens Axboe 			if (cluster) {
39ae03bf63SMartin K. Petersen 				if (seg_size + bv->bv_len
40ae03bf63SMartin K. Petersen 				    > queue_max_segment_size(q))
41d6d48196SJens Axboe 					goto new_segment;
42d6d48196SJens Axboe 				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
43d6d48196SJens Axboe 					goto new_segment;
44d6d48196SJens Axboe 				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
45d6d48196SJens Axboe 					goto new_segment;
46d6d48196SJens Axboe 
47d6d48196SJens Axboe 				seg_size += bv->bv_len;
48d6d48196SJens Axboe 				bvprv = bv;
49d6d48196SJens Axboe 				continue;
50d6d48196SJens Axboe 			}
51d6d48196SJens Axboe new_segment:
521e428079SJens Axboe 			if (nr_phys_segs == 1 && seg_size >
531e428079SJens Axboe 			    fbio->bi_seg_front_size)
541e428079SJens Axboe 				fbio->bi_seg_front_size = seg_size;
5586771427SFUJITA Tomonori 
56d6d48196SJens Axboe 			nr_phys_segs++;
57d6d48196SJens Axboe 			bvprv = bv;
58d6d48196SJens Axboe 			seg_size = bv->bv_len;
59d6d48196SJens Axboe 			highprv = high;
60d6d48196SJens Axboe 		}
6159247eaeSJens Axboe 		bbio = bio;
621e428079SJens Axboe 	}
63d6d48196SJens Axboe 
6459247eaeSJens Axboe 	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
6559247eaeSJens Axboe 		fbio->bi_seg_front_size = seg_size;
6659247eaeSJens Axboe 	if (seg_size > bbio->bi_seg_back_size)
6759247eaeSJens Axboe 		bbio->bi_seg_back_size = seg_size;
681e428079SJens Axboe 
691e428079SJens Axboe 	return nr_phys_segs;
701e428079SJens Axboe }
711e428079SJens Axboe 
721e428079SJens Axboe void blk_recalc_rq_segments(struct request *rq)
731e428079SJens Axboe {
7459247eaeSJens Axboe 	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
75d6d48196SJens Axboe }
76d6d48196SJens Axboe 
77d6d48196SJens Axboe void blk_recount_segments(struct request_queue *q, struct bio *bio)
78d6d48196SJens Axboe {
79d6d48196SJens Axboe 	struct bio *nxt = bio->bi_next;
801e428079SJens Axboe 
81d6d48196SJens Axboe 	bio->bi_next = NULL;
8259247eaeSJens Axboe 	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
83d6d48196SJens Axboe 	bio->bi_next = nxt;
84d6d48196SJens Axboe 	bio->bi_flags |= (1 << BIO_SEG_VALID);
85d6d48196SJens Axboe }
86d6d48196SJens Axboe EXPORT_SYMBOL(blk_recount_segments);
87d6d48196SJens Axboe 
88d6d48196SJens Axboe static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
89d6d48196SJens Axboe 				   struct bio *nxt)
90d6d48196SJens Axboe {
9175ad23bcSNick Piggin 	if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
92d6d48196SJens Axboe 		return 0;
93d6d48196SJens Axboe 
9486771427SFUJITA Tomonori 	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
95ae03bf63SMartin K. Petersen 	    queue_max_segment_size(q))
96d6d48196SJens Axboe 		return 0;
97d6d48196SJens Axboe 
98e17fc0a1SDavid Woodhouse 	if (!bio_has_data(bio))
99e17fc0a1SDavid Woodhouse 		return 1;
100e17fc0a1SDavid Woodhouse 
101e17fc0a1SDavid Woodhouse 	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
102e17fc0a1SDavid Woodhouse 		return 0;
103e17fc0a1SDavid Woodhouse 
104d6d48196SJens Axboe 	/*
105e17fc0a1SDavid Woodhouse 	 * bio and nxt are contiguous in memory; check if the queue allows
106d6d48196SJens Axboe 	 * these two to be merged into one
107d6d48196SJens Axboe 	 */
108d6d48196SJens Axboe 	if (BIO_SEG_BOUNDARY(q, bio, nxt))
109d6d48196SJens Axboe 		return 1;
110d6d48196SJens Axboe 
111d6d48196SJens Axboe 	return 0;
112d6d48196SJens Axboe }
113d6d48196SJens Axboe 
114d6d48196SJens Axboe /*
115d6d48196SJens Axboe  * map a request to scatterlist, return number of sg entries setup. Caller
116d6d48196SJens Axboe  * must make sure sg can hold rq->nr_phys_segments entries
117d6d48196SJens Axboe  */
118d6d48196SJens Axboe int blk_rq_map_sg(struct request_queue *q, struct request *rq,
119d6d48196SJens Axboe 		  struct scatterlist *sglist)
120d6d48196SJens Axboe {
121d6d48196SJens Axboe 	struct bio_vec *bvec, *bvprv;
122d6d48196SJens Axboe 	struct req_iterator iter;
123d6d48196SJens Axboe 	struct scatterlist *sg;
124d6d48196SJens Axboe 	int nsegs, cluster;
125d6d48196SJens Axboe 
126d6d48196SJens Axboe 	nsegs = 0;
12775ad23bcSNick Piggin 	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
128d6d48196SJens Axboe 
129d6d48196SJens Axboe 	/*
130d6d48196SJens Axboe 	 * for each bio in rq
131d6d48196SJens Axboe 	 */
132d6d48196SJens Axboe 	bvprv = NULL;
133d6d48196SJens Axboe 	sg = NULL;
134d6d48196SJens Axboe 	rq_for_each_segment(bvec, rq, iter) {
135d6d48196SJens Axboe 		int nbytes = bvec->bv_len;
136d6d48196SJens Axboe 
137d6d48196SJens Axboe 		if (bvprv && cluster) {
138ae03bf63SMartin K. Petersen 			if (sg->length + nbytes > queue_max_segment_size(q))
139d6d48196SJens Axboe 				goto new_segment;
140d6d48196SJens Axboe 
141d6d48196SJens Axboe 			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
142d6d48196SJens Axboe 				goto new_segment;
143d6d48196SJens Axboe 			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
144d6d48196SJens Axboe 				goto new_segment;
145d6d48196SJens Axboe 
146d6d48196SJens Axboe 			sg->length += nbytes;
147d6d48196SJens Axboe 		} else {
148d6d48196SJens Axboe new_segment:
149d6d48196SJens Axboe 			if (!sg)
150d6d48196SJens Axboe 				sg = sglist;
151d6d48196SJens Axboe 			else {
152d6d48196SJens Axboe 				/*
153d6d48196SJens Axboe 				 * If the driver previously mapped a shorter
154d6d48196SJens Axboe 				 * list, we could see a termination bit
155d6d48196SJens Axboe 				 * prematurely unless it fully inits the sg
156d6d48196SJens Axboe 				 * table on each mapping. We KNOW that there
157d6d48196SJens Axboe 				 * must be more entries here or the driver
158d6d48196SJens Axboe 				 * would be buggy, so force clear the
159d6d48196SJens Axboe 				 * termination bit to avoid doing a full
160d6d48196SJens Axboe 				 * sg_init_table() in drivers for each command.
161d6d48196SJens Axboe 				 */
162d6d48196SJens Axboe 				sg->page_link &= ~0x02;
163d6d48196SJens Axboe 				sg = sg_next(sg);
164d6d48196SJens Axboe 			}
165d6d48196SJens Axboe 
166d6d48196SJens Axboe 			sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
167d6d48196SJens Axboe 			nsegs++;
168d6d48196SJens Axboe 		}
169d6d48196SJens Axboe 		bvprv = bvec;
170d6d48196SJens Axboe 	} /* segments in rq */
171d6d48196SJens Axboe 
172f18573abSFUJITA Tomonori 
173f18573abSFUJITA Tomonori 	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
1742e46e8b2STejun Heo 	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
1752e46e8b2STejun Heo 		unsigned int pad_len =
1762e46e8b2STejun Heo 			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
177f18573abSFUJITA Tomonori 
178f18573abSFUJITA Tomonori 		sg->length += pad_len;
179f18573abSFUJITA Tomonori 		rq->extra_len += pad_len;
180f18573abSFUJITA Tomonori 	}
181f18573abSFUJITA Tomonori 
1822fb98e84STejun Heo 	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
183db0a2e00STejun Heo 		if (rq->cmd_flags & REQ_RW)
184db0a2e00STejun Heo 			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
185db0a2e00STejun Heo 
186d6d48196SJens Axboe 		sg->page_link &= ~0x02;
187d6d48196SJens Axboe 		sg = sg_next(sg);
188d6d48196SJens Axboe 		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
189d6d48196SJens Axboe 			    q->dma_drain_size,
190d6d48196SJens Axboe 			    ((unsigned long)q->dma_drain_buffer) &
191d6d48196SJens Axboe 			    (PAGE_SIZE - 1));
192d6d48196SJens Axboe 		nsegs++;
1937a85f889SFUJITA Tomonori 		rq->extra_len += q->dma_drain_size;
194d6d48196SJens Axboe 	}
195d6d48196SJens Axboe 
196d6d48196SJens Axboe 	if (sg)
197d6d48196SJens Axboe 		sg_mark_end(sg);
198d6d48196SJens Axboe 
199d6d48196SJens Axboe 	return nsegs;
200d6d48196SJens Axboe }
201d6d48196SJens Axboe EXPORT_SYMBOL(blk_rq_map_sg);
202d6d48196SJens Axboe 
203d6d48196SJens Axboe static inline int ll_new_hw_segment(struct request_queue *q,
204d6d48196SJens Axboe 				    struct request *req,
205d6d48196SJens Axboe 				    struct bio *bio)
206d6d48196SJens Axboe {
207d6d48196SJens Axboe 	int nr_phys_segs = bio_phys_segments(q, bio);
208d6d48196SJens Axboe 
209ae03bf63SMartin K. Petersen 	if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
210ae03bf63SMartin K. Petersen 	    req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
211d6d48196SJens Axboe 		req->cmd_flags |= REQ_NOMERGE;
212d6d48196SJens Axboe 		if (req == q->last_merge)
213d6d48196SJens Axboe 			q->last_merge = NULL;
214d6d48196SJens Axboe 		return 0;
215d6d48196SJens Axboe 	}
216d6d48196SJens Axboe 
217d6d48196SJens Axboe 	/*
218d6d48196SJens Axboe 	 * This will form the start of a new hw segment.  Bump both
219d6d48196SJens Axboe 	 * counters.
220d6d48196SJens Axboe 	 */
221d6d48196SJens Axboe 	req->nr_phys_segments += nr_phys_segs;
222d6d48196SJens Axboe 	return 1;
223d6d48196SJens Axboe }
224d6d48196SJens Axboe 
225d6d48196SJens Axboe int ll_back_merge_fn(struct request_queue *q, struct request *req,
226d6d48196SJens Axboe 		     struct bio *bio)
227d6d48196SJens Axboe {
228d6d48196SJens Axboe 	unsigned short max_sectors;
229d6d48196SJens Axboe 
230d6d48196SJens Axboe 	if (unlikely(blk_pc_request(req)))
231ae03bf63SMartin K. Petersen 		max_sectors = queue_max_hw_sectors(q);
232d6d48196SJens Axboe 	else
233ae03bf63SMartin K. Petersen 		max_sectors = queue_max_sectors(q);
234d6d48196SJens Axboe 
23583096ebfSTejun Heo 	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
236d6d48196SJens Axboe 		req->cmd_flags |= REQ_NOMERGE;
237d6d48196SJens Axboe 		if (req == q->last_merge)
238d6d48196SJens Axboe 			q->last_merge = NULL;
239d6d48196SJens Axboe 		return 0;
240d6d48196SJens Axboe 	}
2412cdf79caSJens Axboe 	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
242d6d48196SJens Axboe 		blk_recount_segments(q, req->biotail);
2432cdf79caSJens Axboe 	if (!bio_flagged(bio, BIO_SEG_VALID))
244d6d48196SJens Axboe 		blk_recount_segments(q, bio);
245d6d48196SJens Axboe 
246d6d48196SJens Axboe 	return ll_new_hw_segment(q, req, bio);
247d6d48196SJens Axboe }
248d6d48196SJens Axboe 
249d6d48196SJens Axboe int ll_front_merge_fn(struct request_queue *q, struct request *req,
250d6d48196SJens Axboe 		      struct bio *bio)
251d6d48196SJens Axboe {
252d6d48196SJens Axboe 	unsigned short max_sectors;
253d6d48196SJens Axboe 
254d6d48196SJens Axboe 	if (unlikely(blk_pc_request(req)))
255ae03bf63SMartin K. Petersen 		max_sectors = queue_max_hw_sectors(q);
256d6d48196SJens Axboe 	else
257ae03bf63SMartin K. Petersen 		max_sectors = queue_max_sectors(q);
258d6d48196SJens Axboe 
259d6d48196SJens Axboe 
26083096ebfSTejun Heo 	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
261d6d48196SJens Axboe 		req->cmd_flags |= REQ_NOMERGE;
262d6d48196SJens Axboe 		if (req == q->last_merge)
263d6d48196SJens Axboe 			q->last_merge = NULL;
264d6d48196SJens Axboe 		return 0;
265d6d48196SJens Axboe 	}
2662cdf79caSJens Axboe 	if (!bio_flagged(bio, BIO_SEG_VALID))
267d6d48196SJens Axboe 		blk_recount_segments(q, bio);
2682cdf79caSJens Axboe 	if (!bio_flagged(req->bio, BIO_SEG_VALID))
269d6d48196SJens Axboe 		blk_recount_segments(q, req->bio);
270d6d48196SJens Axboe 
271d6d48196SJens Axboe 	return ll_new_hw_segment(q, req, bio);
272d6d48196SJens Axboe }
273d6d48196SJens Axboe 
274d6d48196SJens Axboe static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
275d6d48196SJens Axboe 				struct request *next)
276d6d48196SJens Axboe {
277d6d48196SJens Axboe 	int total_phys_segments;
27886771427SFUJITA Tomonori 	unsigned int seg_size =
27986771427SFUJITA Tomonori 		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
280d6d48196SJens Axboe 
281d6d48196SJens Axboe 	/*
282d6d48196SJens Axboe 	 * First check if the either of the requests are re-queued
283d6d48196SJens Axboe 	 * requests.  Can't merge them if they are.
284d6d48196SJens Axboe 	 */
285d6d48196SJens Axboe 	if (req->special || next->special)
286d6d48196SJens Axboe 		return 0;
287d6d48196SJens Axboe 
288d6d48196SJens Axboe 	/*
289d6d48196SJens Axboe 	 * Will it become too large?
290d6d48196SJens Axboe 	 */
291ae03bf63SMartin K. Petersen 	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
292d6d48196SJens Axboe 		return 0;
293d6d48196SJens Axboe 
294d6d48196SJens Axboe 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
29586771427SFUJITA Tomonori 	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
29686771427SFUJITA Tomonori 		if (req->nr_phys_segments == 1)
29786771427SFUJITA Tomonori 			req->bio->bi_seg_front_size = seg_size;
29886771427SFUJITA Tomonori 		if (next->nr_phys_segments == 1)
29986771427SFUJITA Tomonori 			next->biotail->bi_seg_back_size = seg_size;
300d6d48196SJens Axboe 		total_phys_segments--;
30186771427SFUJITA Tomonori 	}
302d6d48196SJens Axboe 
303ae03bf63SMartin K. Petersen 	if (total_phys_segments > queue_max_phys_segments(q))
304d6d48196SJens Axboe 		return 0;
305d6d48196SJens Axboe 
306ae03bf63SMartin K. Petersen 	if (total_phys_segments > queue_max_hw_segments(q))
307d6d48196SJens Axboe 		return 0;
308d6d48196SJens Axboe 
309d6d48196SJens Axboe 	/* Merge is OK... */
310d6d48196SJens Axboe 	req->nr_phys_segments = total_phys_segments;
311d6d48196SJens Axboe 	return 1;
312d6d48196SJens Axboe }
313d6d48196SJens Axboe 
31480a761fdSTejun Heo /**
31580a761fdSTejun Heo  * blk_rq_set_mixed_merge - mark a request as mixed merge
31680a761fdSTejun Heo  * @rq: request to mark as mixed merge
31780a761fdSTejun Heo  *
31880a761fdSTejun Heo  * Description:
31980a761fdSTejun Heo  *     @rq is about to be mixed merged.  Make sure the attributes
32080a761fdSTejun Heo  *     which can be mixed are set in each bio and mark @rq as mixed
32180a761fdSTejun Heo  *     merged.
32280a761fdSTejun Heo  */
32380a761fdSTejun Heo void blk_rq_set_mixed_merge(struct request *rq)
32480a761fdSTejun Heo {
32580a761fdSTejun Heo 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
32680a761fdSTejun Heo 	struct bio *bio;
32780a761fdSTejun Heo 
32880a761fdSTejun Heo 	if (rq->cmd_flags & REQ_MIXED_MERGE)
32980a761fdSTejun Heo 		return;
33080a761fdSTejun Heo 
33180a761fdSTejun Heo 	/*
33280a761fdSTejun Heo 	 * @rq will no longer represent mixable attributes for all the
33380a761fdSTejun Heo 	 * contained bios.  It will just track those of the first one.
33480a761fdSTejun Heo 	 * Distributes the attributs to each bio.
33580a761fdSTejun Heo 	 */
33680a761fdSTejun Heo 	for (bio = rq->bio; bio; bio = bio->bi_next) {
33780a761fdSTejun Heo 		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
33880a761fdSTejun Heo 			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
33980a761fdSTejun Heo 		bio->bi_rw |= ff;
34080a761fdSTejun Heo 	}
34180a761fdSTejun Heo 	rq->cmd_flags |= REQ_MIXED_MERGE;
34280a761fdSTejun Heo }
34380a761fdSTejun Heo 
34426308eabSJerome Marchand static void blk_account_io_merge(struct request *req)
34526308eabSJerome Marchand {
34626308eabSJerome Marchand 	if (blk_do_io_stat(req)) {
34726308eabSJerome Marchand 		struct hd_struct *part;
34826308eabSJerome Marchand 		int cpu;
34926308eabSJerome Marchand 
35026308eabSJerome Marchand 		cpu = part_stat_lock();
35183096ebfSTejun Heo 		part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
35226308eabSJerome Marchand 
35326308eabSJerome Marchand 		part_round_stats(cpu, part);
35426308eabSJerome Marchand 		part_dec_in_flight(part);
35526308eabSJerome Marchand 
35626308eabSJerome Marchand 		part_stat_unlock();
35726308eabSJerome Marchand 	}
35826308eabSJerome Marchand }
35926308eabSJerome Marchand 
360d6d48196SJens Axboe /*
361d6d48196SJens Axboe  * Has to be called with the request spinlock acquired
362d6d48196SJens Axboe  */
363d6d48196SJens Axboe static int attempt_merge(struct request_queue *q, struct request *req,
364d6d48196SJens Axboe 			  struct request *next)
365d6d48196SJens Axboe {
366d6d48196SJens Axboe 	if (!rq_mergeable(req) || !rq_mergeable(next))
367d6d48196SJens Axboe 		return 0;
368d6d48196SJens Axboe 
369d6d48196SJens Axboe 	/*
370d6d48196SJens Axboe 	 * not contiguous
371d6d48196SJens Axboe 	 */
37283096ebfSTejun Heo 	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
373d6d48196SJens Axboe 		return 0;
374d6d48196SJens Axboe 
375d6d48196SJens Axboe 	if (rq_data_dir(req) != rq_data_dir(next)
376d6d48196SJens Axboe 	    || req->rq_disk != next->rq_disk
377d6d48196SJens Axboe 	    || next->special)
378d6d48196SJens Axboe 		return 0;
379d6d48196SJens Axboe 
3807ba1ba12SMartin K. Petersen 	if (blk_integrity_rq(req) != blk_integrity_rq(next))
3817ba1ba12SMartin K. Petersen 		return 0;
3827ba1ba12SMartin K. Petersen 
383ab0fd1deSTejun Heo 	/* don't merge requests of different failfast settings */
384ab0fd1deSTejun Heo 	if (blk_failfast_dev(req)	!= blk_failfast_dev(next)	||
385ab0fd1deSTejun Heo 	    blk_failfast_transport(req)	!= blk_failfast_transport(next)	||
386ab0fd1deSTejun Heo 	    blk_failfast_driver(req)	!= blk_failfast_driver(next))
387ab0fd1deSTejun Heo 		return 0;
388ab0fd1deSTejun Heo 
389d6d48196SJens Axboe 	/*
390d6d48196SJens Axboe 	 * If we are allowed to merge, then append bio list
391d6d48196SJens Axboe 	 * from next to rq and release next. merge_requests_fn
392d6d48196SJens Axboe 	 * will have updated segment counts, update sector
393d6d48196SJens Axboe 	 * counts here.
394d6d48196SJens Axboe 	 */
395d6d48196SJens Axboe 	if (!ll_merge_requests_fn(q, req, next))
396d6d48196SJens Axboe 		return 0;
397d6d48196SJens Axboe 
398d6d48196SJens Axboe 	/*
39980a761fdSTejun Heo 	 * If failfast settings disagree or any of the two is already
40080a761fdSTejun Heo 	 * a mixed merge, mark both as mixed before proceeding.  This
40180a761fdSTejun Heo 	 * makes sure that all involved bios have mixable attributes
40280a761fdSTejun Heo 	 * set properly.
40380a761fdSTejun Heo 	 */
40480a761fdSTejun Heo 	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
40580a761fdSTejun Heo 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
40680a761fdSTejun Heo 	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
40780a761fdSTejun Heo 		blk_rq_set_mixed_merge(req);
40880a761fdSTejun Heo 		blk_rq_set_mixed_merge(next);
40980a761fdSTejun Heo 	}
41080a761fdSTejun Heo 
41180a761fdSTejun Heo 	/*
412d6d48196SJens Axboe 	 * At this point we have either done a back merge
413d6d48196SJens Axboe 	 * or front merge. We need the smaller start_time of
414d6d48196SJens Axboe 	 * the merged requests to be the current request
415d6d48196SJens Axboe 	 * for accounting purposes.
416d6d48196SJens Axboe 	 */
417d6d48196SJens Axboe 	if (time_after(req->start_time, next->start_time))
418d6d48196SJens Axboe 		req->start_time = next->start_time;
419d6d48196SJens Axboe 
420d6d48196SJens Axboe 	req->biotail->bi_next = next->bio;
421d6d48196SJens Axboe 	req->biotail = next->biotail;
422d6d48196SJens Axboe 
423a2dec7b3STejun Heo 	req->__data_len += blk_rq_bytes(next);
424d6d48196SJens Axboe 
425d6d48196SJens Axboe 	elv_merge_requests(q, req, next);
426d6d48196SJens Axboe 
42742dad764SJerome Marchand 	/*
42842dad764SJerome Marchand 	 * 'next' is going away, so update stats accordingly
42942dad764SJerome Marchand 	 */
43042dad764SJerome Marchand 	blk_account_io_merge(next);
431d6d48196SJens Axboe 
432d6d48196SJens Axboe 	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
433ab780f1eSJens Axboe 	if (blk_rq_cpu_valid(next))
434ab780f1eSJens Axboe 		req->cpu = next->cpu;
435d6d48196SJens Axboe 
4361cd96c24SBoaz Harrosh 	/* owner-ship of bio passed from next to req */
4371cd96c24SBoaz Harrosh 	next->bio = NULL;
438d6d48196SJens Axboe 	__blk_put_request(q, next);
439d6d48196SJens Axboe 	return 1;
440d6d48196SJens Axboe }
441d6d48196SJens Axboe 
442d6d48196SJens Axboe int attempt_back_merge(struct request_queue *q, struct request *rq)
443d6d48196SJens Axboe {
444d6d48196SJens Axboe 	struct request *next = elv_latter_request(q, rq);
445d6d48196SJens Axboe 
446d6d48196SJens Axboe 	if (next)
447d6d48196SJens Axboe 		return attempt_merge(q, rq, next);
448d6d48196SJens Axboe 
449d6d48196SJens Axboe 	return 0;
450d6d48196SJens Axboe }
451d6d48196SJens Axboe 
452d6d48196SJens Axboe int attempt_front_merge(struct request_queue *q, struct request *rq)
453d6d48196SJens Axboe {
454d6d48196SJens Axboe 	struct request *prev = elv_former_request(q, rq);
455d6d48196SJens Axboe 
456d6d48196SJens Axboe 	if (prev)
457d6d48196SJens Axboe 		return attempt_merge(q, prev, rq);
458d6d48196SJens Axboe 
459d6d48196SJens Axboe 	return 0;
460d6d48196SJens Axboe }
461