xref: /openbmc/linux/block/blk-merge.c (revision 613b1488)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2d6d48196SJens Axboe /*
3d6d48196SJens Axboe  * Functions related to segment and merge handling
4d6d48196SJens Axboe  */
5d6d48196SJens Axboe #include <linux/kernel.h>
6d6d48196SJens Axboe #include <linux/module.h>
7d6d48196SJens Axboe #include <linux/bio.h>
8d6d48196SJens Axboe #include <linux/blkdev.h>
9fe45e630SChristoph Hellwig #include <linux/blk-integrity.h>
10d6d48196SJens Axboe #include <linux/scatterlist.h>
1182d981d4SChristoph Hellwig #include <linux/part_stat.h>
126b2b0459STejun Heo #include <linux/blk-cgroup.h>
13d6d48196SJens Axboe 
14cda22646SMike Krinkin #include <trace/events/block.h>
15cda22646SMike Krinkin 
16d6d48196SJens Axboe #include "blk.h"
172aa7745bSChristoph Hellwig #include "blk-mq-sched.h"
188e756373SBaolin Wang #include "blk-rq-qos.h"
19a7b36ee6SJens Axboe #include "blk-throttle.h"
20d6d48196SJens Axboe 
21ff18d77bSChristoph Hellwig static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
22ff18d77bSChristoph Hellwig {
23ff18d77bSChristoph Hellwig 	*bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
24ff18d77bSChristoph Hellwig }
25ff18d77bSChristoph Hellwig 
26ff18d77bSChristoph Hellwig static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
27ff18d77bSChristoph Hellwig {
28ff18d77bSChristoph Hellwig 	struct bvec_iter iter = bio->bi_iter;
29ff18d77bSChristoph Hellwig 	int idx;
30ff18d77bSChristoph Hellwig 
31ff18d77bSChristoph Hellwig 	bio_get_first_bvec(bio, bv);
32ff18d77bSChristoph Hellwig 	if (bv->bv_len == bio->bi_iter.bi_size)
33ff18d77bSChristoph Hellwig 		return;		/* this bio only has a single bvec */
34ff18d77bSChristoph Hellwig 
35ff18d77bSChristoph Hellwig 	bio_advance_iter(bio, &iter, iter.bi_size);
36ff18d77bSChristoph Hellwig 
37ff18d77bSChristoph Hellwig 	if (!iter.bi_bvec_done)
38ff18d77bSChristoph Hellwig 		idx = iter.bi_idx - 1;
39ff18d77bSChristoph Hellwig 	else	/* in the middle of bvec */
40ff18d77bSChristoph Hellwig 		idx = iter.bi_idx;
41ff18d77bSChristoph Hellwig 
42ff18d77bSChristoph Hellwig 	*bv = bio->bi_io_vec[idx];
43ff18d77bSChristoph Hellwig 
44ff18d77bSChristoph Hellwig 	/*
45ff18d77bSChristoph Hellwig 	 * iter.bi_bvec_done records actual length of the last bvec
46ff18d77bSChristoph Hellwig 	 * if this bio ends in the middle of one io vector
47ff18d77bSChristoph Hellwig 	 */
48ff18d77bSChristoph Hellwig 	if (iter.bi_bvec_done)
49ff18d77bSChristoph Hellwig 		bv->bv_len = iter.bi_bvec_done;
50ff18d77bSChristoph Hellwig }
51ff18d77bSChristoph Hellwig 
52e9907009SChristoph Hellwig static inline bool bio_will_gap(struct request_queue *q,
53e9907009SChristoph Hellwig 		struct request *prev_rq, struct bio *prev, struct bio *next)
54e9907009SChristoph Hellwig {
55e9907009SChristoph Hellwig 	struct bio_vec pb, nb;
56e9907009SChristoph Hellwig 
57e9907009SChristoph Hellwig 	if (!bio_has_data(prev) || !queue_virt_boundary(q))
58e9907009SChristoph Hellwig 		return false;
59e9907009SChristoph Hellwig 
60e9907009SChristoph Hellwig 	/*
61e9907009SChristoph Hellwig 	 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
62e9907009SChristoph Hellwig 	 * is quite difficult to respect the sg gap limit.  We work hard to
63e9907009SChristoph Hellwig 	 * merge a huge number of small single bios in case of mkfs.
64e9907009SChristoph Hellwig 	 */
65e9907009SChristoph Hellwig 	if (prev_rq)
66e9907009SChristoph Hellwig 		bio_get_first_bvec(prev_rq->bio, &pb);
67e9907009SChristoph Hellwig 	else
68e9907009SChristoph Hellwig 		bio_get_first_bvec(prev, &pb);
69df376b2eSJohannes Thumshirn 	if (pb.bv_offset & queue_virt_boundary(q))
70e9907009SChristoph Hellwig 		return true;
71e9907009SChristoph Hellwig 
72e9907009SChristoph Hellwig 	/*
73e9907009SChristoph Hellwig 	 * We don't need to worry about the situation that the merged segment
74e9907009SChristoph Hellwig 	 * ends in unaligned virt boundary:
75e9907009SChristoph Hellwig 	 *
76e9907009SChristoph Hellwig 	 * - if 'pb' ends aligned, the merged segment ends aligned
77e9907009SChristoph Hellwig 	 * - if 'pb' ends unaligned, the next bio must include
78e9907009SChristoph Hellwig 	 *   one single bvec of 'nb', otherwise the 'nb' can't
79e9907009SChristoph Hellwig 	 *   merge with 'pb'
80e9907009SChristoph Hellwig 	 */
81e9907009SChristoph Hellwig 	bio_get_last_bvec(prev, &pb);
82e9907009SChristoph Hellwig 	bio_get_first_bvec(next, &nb);
83200a9affSChristoph Hellwig 	if (biovec_phys_mergeable(q, &pb, &nb))
84e9907009SChristoph Hellwig 		return false;
85c55ddd90SChristoph Hellwig 	return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
86e9907009SChristoph Hellwig }
87e9907009SChristoph Hellwig 
88e9907009SChristoph Hellwig static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
89e9907009SChristoph Hellwig {
90e9907009SChristoph Hellwig 	return bio_will_gap(req->q, req, req->biotail, bio);
91e9907009SChristoph Hellwig }
92e9907009SChristoph Hellwig 
93e9907009SChristoph Hellwig static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
94e9907009SChristoph Hellwig {
95e9907009SChristoph Hellwig 	return bio_will_gap(req->q, NULL, bio, req->bio);
96e9907009SChristoph Hellwig }
97e9907009SChristoph Hellwig 
98b6dc6198SChristoph Hellwig /*
99b6dc6198SChristoph Hellwig  * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
100b6dc6198SChristoph Hellwig  * is defined as 'unsigned int', meantime it has to be aligned to with the
101b6dc6198SChristoph Hellwig  * logical block size, which is the minimum accepted unit by hardware.
102b6dc6198SChristoph Hellwig  */
103aa261f20SBart Van Assche static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
104b6dc6198SChristoph Hellwig {
105c55ddd90SChristoph Hellwig 	return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
106b6dc6198SChristoph Hellwig }
107b6dc6198SChristoph Hellwig 
108aa261f20SBart Van Assche static struct bio *bio_split_discard(struct bio *bio,
109aa261f20SBart Van Assche 				     const struct queue_limits *lim,
1105a97806fSChristoph Hellwig 				     unsigned *nsegs, struct bio_set *bs)
11154efd50bSKent Overstreet {
11254efd50bSKent Overstreet 	unsigned int max_discard_sectors, granularity;
11354efd50bSKent Overstreet 	sector_t tmp;
11454efd50bSKent Overstreet 	unsigned split_sectors;
11554efd50bSKent Overstreet 
116bdced438SMing Lei 	*nsegs = 1;
117bdced438SMing Lei 
11854efd50bSKent Overstreet 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
119c55ddd90SChristoph Hellwig 	granularity = max(lim->discard_granularity >> 9, 1U);
12054efd50bSKent Overstreet 
121c55ddd90SChristoph Hellwig 	max_discard_sectors =
122c55ddd90SChristoph Hellwig 		min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
12354efd50bSKent Overstreet 	max_discard_sectors -= max_discard_sectors % granularity;
12454efd50bSKent Overstreet 
12554efd50bSKent Overstreet 	if (unlikely(!max_discard_sectors)) {
12654efd50bSKent Overstreet 		/* XXX: warn */
12754efd50bSKent Overstreet 		return NULL;
12854efd50bSKent Overstreet 	}
12954efd50bSKent Overstreet 
13054efd50bSKent Overstreet 	if (bio_sectors(bio) <= max_discard_sectors)
13154efd50bSKent Overstreet 		return NULL;
13254efd50bSKent Overstreet 
13354efd50bSKent Overstreet 	split_sectors = max_discard_sectors;
13454efd50bSKent Overstreet 
13554efd50bSKent Overstreet 	/*
13654efd50bSKent Overstreet 	 * If the next starting sector would be misaligned, stop the discard at
13754efd50bSKent Overstreet 	 * the previous aligned sector.
13854efd50bSKent Overstreet 	 */
139c55ddd90SChristoph Hellwig 	tmp = bio->bi_iter.bi_sector + split_sectors -
140c55ddd90SChristoph Hellwig 		((lim->discard_alignment >> 9) % granularity);
14154efd50bSKent Overstreet 	tmp = sector_div(tmp, granularity);
14254efd50bSKent Overstreet 
14354efd50bSKent Overstreet 	if (split_sectors > tmp)
14454efd50bSKent Overstreet 		split_sectors -= tmp;
14554efd50bSKent Overstreet 
14654efd50bSKent Overstreet 	return bio_split(bio, split_sectors, GFP_NOIO, bs);
14754efd50bSKent Overstreet }
14854efd50bSKent Overstreet 
1495a97806fSChristoph Hellwig static struct bio *bio_split_write_zeroes(struct bio *bio,
150aa261f20SBart Van Assche 					  const struct queue_limits *lim,
151aa261f20SBart Van Assche 					  unsigned *nsegs, struct bio_set *bs)
152885fa13fSChristoph Hellwig {
153d665e12aSChristoph Hellwig 	*nsegs = 0;
154c55ddd90SChristoph Hellwig 	if (!lim->max_write_zeroes_sectors)
155885fa13fSChristoph Hellwig 		return NULL;
156c55ddd90SChristoph Hellwig 	if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
157885fa13fSChristoph Hellwig 		return NULL;
158c55ddd90SChristoph Hellwig 	return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
159885fa13fSChristoph Hellwig }
160885fa13fSChristoph Hellwig 
1619cc5169cSBart Van Assche /*
1629cc5169cSBart Van Assche  * Return the maximum number of sectors from the start of a bio that may be
1639cc5169cSBart Van Assche  * submitted as a single request to a block device. If enough sectors remain,
1649cc5169cSBart Van Assche  * align the end to the physical block size. Otherwise align the end to the
1659cc5169cSBart Van Assche  * logical block size. This approach minimizes the number of non-aligned
1669cc5169cSBart Van Assche  * requests that are submitted to a block device if the start of a bio is not
1679cc5169cSBart Van Assche  * aligned to a physical block boundary.
1689cc5169cSBart Van Assche  */
1695a97806fSChristoph Hellwig static inline unsigned get_max_io_size(struct bio *bio,
170aa261f20SBart Van Assche 				       const struct queue_limits *lim)
171d0e5fbb0SMing Lei {
172c55ddd90SChristoph Hellwig 	unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
173c55ddd90SChristoph Hellwig 	unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
174c55ddd90SChristoph Hellwig 	unsigned max_sectors = lim->max_sectors, start, end;
175d0e5fbb0SMing Lei 
176c55ddd90SChristoph Hellwig 	if (lim->chunk_sectors) {
177efef739dSChristoph Hellwig 		max_sectors = min(max_sectors,
178efef739dSChristoph Hellwig 			blk_chunk_sectors_left(bio->bi_iter.bi_sector,
179c55ddd90SChristoph Hellwig 					       lim->chunk_sectors));
180efef739dSChristoph Hellwig 	}
181d0e5fbb0SMing Lei 
18284613bedSChristoph Hellwig 	start = bio->bi_iter.bi_sector & (pbs - 1);
18384613bedSChristoph Hellwig 	end = (start + max_sectors) & ~(pbs - 1);
18484613bedSChristoph Hellwig 	if (end > start)
18584613bedSChristoph Hellwig 		return end - start;
18684613bedSChristoph Hellwig 	return max_sectors & ~(lbs - 1);
187d0e5fbb0SMing Lei }
188d0e5fbb0SMing Lei 
18995465318SBart Van Assche /**
19095465318SBart Van Assche  * get_max_segment_size() - maximum number of bytes to add as a single segment
19195465318SBart Van Assche  * @lim: Request queue limits.
19295465318SBart Van Assche  * @start_page: See below.
19395465318SBart Van Assche  * @offset: Offset from @start_page where to add a segment.
19495465318SBart Van Assche  *
19595465318SBart Van Assche  * Returns the maximum number of bytes that can be added as a single segment.
19695465318SBart Van Assche  */
197aa261f20SBart Van Assche static inline unsigned get_max_segment_size(const struct queue_limits *lim,
198c55ddd90SChristoph Hellwig 		struct page *start_page, unsigned long offset)
199dcebd755SMing Lei {
200c55ddd90SChristoph Hellwig 	unsigned long mask = lim->seg_boundary_mask;
201dcebd755SMing Lei 
202429120f3SMing Lei 	offset = mask & (page_to_phys(start_page) + offset);
2034a2f704eSMing Lei 
2044a2f704eSMing Lei 	/*
20595465318SBart Van Assche 	 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
20695465318SBart Van Assche 	 * after having calculated the minimum.
2074a2f704eSMing Lei 	 */
20895465318SBart Van Assche 	return min(mask - offset, (unsigned long)lim->max_segment_size - 1) + 1;
209dcebd755SMing Lei }
210dcebd755SMing Lei 
211708b25b3SBart Van Assche /**
212708b25b3SBart Van Assche  * bvec_split_segs - verify whether or not a bvec should be split in the middle
213c55ddd90SChristoph Hellwig  * @lim:      [in] queue limits to split based on
214708b25b3SBart Van Assche  * @bv:       [in] bvec to examine
215708b25b3SBart Van Assche  * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
216708b25b3SBart Van Assche  *            by the number of segments from @bv that may be appended to that
217708b25b3SBart Van Assche  *            bio without exceeding @max_segs
21867927d22SKeith Busch  * @bytes:    [in,out] Number of bytes in the bio being built. Incremented
21967927d22SKeith Busch  *            by the number of bytes from @bv that may be appended to that
22067927d22SKeith Busch  *            bio without exceeding @max_bytes
221708b25b3SBart Van Assche  * @max_segs: [in] upper bound for *@nsegs
22267927d22SKeith Busch  * @max_bytes: [in] upper bound for *@bytes
223708b25b3SBart Van Assche  *
224708b25b3SBart Van Assche  * When splitting a bio, it can happen that a bvec is encountered that is too
225708b25b3SBart Van Assche  * big to fit in a single segment and hence that it has to be split in the
226708b25b3SBart Van Assche  * middle. This function verifies whether or not that should happen. The value
227708b25b3SBart Van Assche  * %true is returned if and only if appending the entire @bv to a bio with
228708b25b3SBart Van Assche  * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
229708b25b3SBart Van Assche  * the block driver.
230dcebd755SMing Lei  */
231aa261f20SBart Van Assche static bool bvec_split_segs(const struct queue_limits *lim,
232aa261f20SBart Van Assche 		const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
233aa261f20SBart Van Assche 		unsigned max_segs, unsigned max_bytes)
234dcebd755SMing Lei {
23567927d22SKeith Busch 	unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
236708b25b3SBart Van Assche 	unsigned len = min(bv->bv_len, max_len);
237dcebd755SMing Lei 	unsigned total_len = 0;
238ff9811b3SBart Van Assche 	unsigned seg_size = 0;
239dcebd755SMing Lei 
240ff9811b3SBart Van Assche 	while (len && *nsegs < max_segs) {
241c55ddd90SChristoph Hellwig 		seg_size = get_max_segment_size(lim, bv->bv_page,
242429120f3SMing Lei 						bv->bv_offset + total_len);
243dcebd755SMing Lei 		seg_size = min(seg_size, len);
244dcebd755SMing Lei 
245ff9811b3SBart Van Assche 		(*nsegs)++;
246dcebd755SMing Lei 		total_len += seg_size;
247dcebd755SMing Lei 		len -= seg_size;
248dcebd755SMing Lei 
249c55ddd90SChristoph Hellwig 		if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
250dcebd755SMing Lei 			break;
251dcebd755SMing Lei 	}
252dcebd755SMing Lei 
25367927d22SKeith Busch 	*bytes += total_len;
254dcebd755SMing Lei 
255708b25b3SBart Van Assche 	/* tell the caller to split the bvec if it is too big to fit */
256708b25b3SBart Van Assche 	return len > 0 || bv->bv_len > max_len;
257dcebd755SMing Lei }
258dcebd755SMing Lei 
259dad77584SBart Van Assche /**
2605a97806fSChristoph Hellwig  * bio_split_rw - split a bio in two bios
261dad77584SBart Van Assche  * @bio:  [in] bio to be split
262c55ddd90SChristoph Hellwig  * @lim:  [in] queue limits to split based on
263dad77584SBart Van Assche  * @segs: [out] number of segments in the bio with the first half of the sectors
2645a97806fSChristoph Hellwig  * @bs:	  [in] bio set to allocate the clone from
265a85b3637SChristoph Hellwig  * @max_bytes: [in] maximum number of bytes per bio
266dad77584SBart Van Assche  *
267dad77584SBart Van Assche  * Clone @bio, update the bi_iter of the clone to represent the first sectors
268dad77584SBart Van Assche  * of @bio and update @bio->bi_iter to represent the remaining sectors. The
269dad77584SBart Van Assche  * following is guaranteed for the cloned bio:
270a85b3637SChristoph Hellwig  * - That it has at most @max_bytes worth of data
271dad77584SBart Van Assche  * - That it has at most queue_max_segments(@q) segments.
272dad77584SBart Van Assche  *
273dad77584SBart Van Assche  * Except for discard requests the cloned bio will point at the bi_io_vec of
274dad77584SBart Van Assche  * the original bio. It is the responsibility of the caller to ensure that the
275dad77584SBart Van Assche  * original bio is not freed before the cloned bio. The caller is also
276dad77584SBart Van Assche  * responsible for ensuring that @bs is only destroyed after processing of the
277dad77584SBart Van Assche  * split bio has finished.
278dad77584SBart Van Assche  */
279aa261f20SBart Van Assche static struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
280a85b3637SChristoph Hellwig 		unsigned *segs, struct bio_set *bs, unsigned max_bytes)
28154efd50bSKent Overstreet {
2825014c311SJens Axboe 	struct bio_vec bv, bvprv, *bvprvp = NULL;
28354efd50bSKent Overstreet 	struct bvec_iter iter;
28467927d22SKeith Busch 	unsigned nsegs = 0, bytes = 0;
28554efd50bSKent Overstreet 
286dcebd755SMing Lei 	bio_for_each_bvec(bv, bio, iter) {
28754efd50bSKent Overstreet 		/*
28854efd50bSKent Overstreet 		 * If the queue doesn't support SG gaps and adding this
28954efd50bSKent Overstreet 		 * offset would create a gap, disallow it.
29054efd50bSKent Overstreet 		 */
291c55ddd90SChristoph Hellwig 		if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
29254efd50bSKent Overstreet 			goto split;
29354efd50bSKent Overstreet 
294c55ddd90SChristoph Hellwig 		if (nsegs < lim->max_segments &&
29567927d22SKeith Busch 		    bytes + bv.bv_len <= max_bytes &&
296708b25b3SBart Van Assche 		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
297708b25b3SBart Van Assche 			nsegs++;
29867927d22SKeith Busch 			bytes += bv.bv_len;
299c55ddd90SChristoph Hellwig 		} else {
300c55ddd90SChristoph Hellwig 			if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
301c55ddd90SChristoph Hellwig 					lim->max_segments, max_bytes))
302e36f6204SKeith Busch 				goto split;
303e36f6204SKeith Busch 		}
304e36f6204SKeith Busch 
30554efd50bSKent Overstreet 		bvprv = bv;
306578270bfSMing Lei 		bvprvp = &bvprv;
30754efd50bSKent Overstreet 	}
30854efd50bSKent Overstreet 
309d627065dSChristoph Hellwig 	*segs = nsegs;
310d627065dSChristoph Hellwig 	return NULL;
31154efd50bSKent Overstreet split:
312bdced438SMing Lei 	*segs = nsegs;
313cc29e1bfSJeffle Xu 
314cc29e1bfSJeffle Xu 	/*
31567927d22SKeith Busch 	 * Individual bvecs might not be logical block aligned. Round down the
31667927d22SKeith Busch 	 * split size so that each bio is properly block size aligned, even if
31767927d22SKeith Busch 	 * we do not use the full hardware limits.
31867927d22SKeith Busch 	 */
319c55ddd90SChristoph Hellwig 	bytes = ALIGN_DOWN(bytes, lim->logical_block_size);
32067927d22SKeith Busch 
32167927d22SKeith Busch 	/*
322cc29e1bfSJeffle Xu 	 * Bio splitting may cause subtle trouble such as hang when doing sync
323cc29e1bfSJeffle Xu 	 * iopoll in direct IO routine. Given performance gain of iopoll for
324cc29e1bfSJeffle Xu 	 * big IO can be trival, disable iopoll when split needed.
325cc29e1bfSJeffle Xu 	 */
3266ce913feSChristoph Hellwig 	bio_clear_polled(bio);
32767927d22SKeith Busch 	return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
32854efd50bSKent Overstreet }
32954efd50bSKent Overstreet 
330dad77584SBart Van Assche /**
3315a97806fSChristoph Hellwig  * __bio_split_to_limits - split a bio to fit the queue limits
3325a97806fSChristoph Hellwig  * @bio:     bio to be split
333c55ddd90SChristoph Hellwig  * @lim:     queue limits to split based on
3345a97806fSChristoph Hellwig  * @nr_segs: returns the number of segments in the returned bio
335dad77584SBart Van Assche  *
3365a97806fSChristoph Hellwig  * Check if @bio needs splitting based on the queue limits, and if so split off
3375a97806fSChristoph Hellwig  * a bio fitting the limits from the beginning of @bio and return it.  @bio is
3385a97806fSChristoph Hellwig  * shortened to the remainder and re-submitted.
3395a97806fSChristoph Hellwig  *
3405a97806fSChristoph Hellwig  * The split bio is allocated from @q->bio_split, which is provided by the
3415a97806fSChristoph Hellwig  * block layer.
342dad77584SBart Van Assche  */
343aa261f20SBart Van Assche struct bio *__bio_split_to_limits(struct bio *bio,
344aa261f20SBart Van Assche 				  const struct queue_limits *lim,
345abd45c15SJens Axboe 				  unsigned int *nr_segs)
34654efd50bSKent Overstreet {
34746754bd0SChristoph Hellwig 	struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
3485a97806fSChristoph Hellwig 	struct bio *split;
34954efd50bSKent Overstreet 
3505a97806fSChristoph Hellwig 	switch (bio_op(bio)) {
3517afafc8aSAdrian Hunter 	case REQ_OP_DISCARD:
3527afafc8aSAdrian Hunter 	case REQ_OP_SECURE_ERASE:
353c55ddd90SChristoph Hellwig 		split = bio_split_discard(bio, lim, nr_segs, bs);
3547afafc8aSAdrian Hunter 		break;
355a6f0788eSChaitanya Kulkarni 	case REQ_OP_WRITE_ZEROES:
356c55ddd90SChristoph Hellwig 		split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
357a6f0788eSChaitanya Kulkarni 		break;
3587afafc8aSAdrian Hunter 	default:
359c55ddd90SChristoph Hellwig 		split = bio_split_rw(bio, lim, nr_segs, bs,
360c55ddd90SChristoph Hellwig 				get_max_io_size(bio, lim) << SECTOR_SHIFT);
361*613b1488SJens Axboe 		if (IS_ERR(split))
362*613b1488SJens Axboe 			return NULL;
3637afafc8aSAdrian Hunter 		break;
3647afafc8aSAdrian Hunter 	}
365bdced438SMing Lei 
36654efd50bSKent Overstreet 	if (split) {
367*613b1488SJens Axboe 		/* there isn't chance to merge the split bio */
3681eff9d32SJens Axboe 		split->bi_opf |= REQ_NOMERGE;
3696ac45aebSMing Lei 
370957a2b34SMuchun Song 		blkcg_bio_issue_init(split);
3715a97806fSChristoph Hellwig 		bio_chain(split, bio);
3725a97806fSChristoph Hellwig 		trace_block_split(split, bio->bi_iter.bi_sector);
3735a97806fSChristoph Hellwig 		submit_bio_noacct(bio);
3745a97806fSChristoph Hellwig 		return split;
37554efd50bSKent Overstreet 	}
3765a97806fSChristoph Hellwig 	return bio;
37754efd50bSKent Overstreet }
37814ccb66bSChristoph Hellwig 
379dad77584SBart Van Assche /**
3805a97806fSChristoph Hellwig  * bio_split_to_limits - split a bio to fit the queue limits
3815a97806fSChristoph Hellwig  * @bio:     bio to be split
382dad77584SBart Van Assche  *
3835a97806fSChristoph Hellwig  * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
3845a97806fSChristoph Hellwig  * if so split off a bio fitting the limits from the beginning of @bio and
3855a97806fSChristoph Hellwig  * return it.  @bio is shortened to the remainder and re-submitted.
3865a97806fSChristoph Hellwig  *
3875a97806fSChristoph Hellwig  * The split bio is allocated from @q->bio_split, which is provided by the
3885a97806fSChristoph Hellwig  * block layer.
389dad77584SBart Van Assche  */
3905a97806fSChristoph Hellwig struct bio *bio_split_to_limits(struct bio *bio)
39114ccb66bSChristoph Hellwig {
392aa261f20SBart Van Assche 	const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
39314ccb66bSChristoph Hellwig 	unsigned int nr_segs;
39414ccb66bSChristoph Hellwig 
395c55ddd90SChristoph Hellwig 	if (bio_may_exceed_limits(bio, lim))
396c55ddd90SChristoph Hellwig 		return __bio_split_to_limits(bio, lim, &nr_segs);
3975a97806fSChristoph Hellwig 	return bio;
39814ccb66bSChristoph Hellwig }
3995a97806fSChristoph Hellwig EXPORT_SYMBOL(bio_split_to_limits);
40054efd50bSKent Overstreet 
401e9cd19c0SChristoph Hellwig unsigned int blk_recalc_rq_segments(struct request *rq)
402d6d48196SJens Axboe {
4036869875fSChristoph Hellwig 	unsigned int nr_phys_segs = 0;
40467927d22SKeith Busch 	unsigned int bytes = 0;
405e9cd19c0SChristoph Hellwig 	struct req_iterator iter;
4066869875fSChristoph Hellwig 	struct bio_vec bv;
407d6d48196SJens Axboe 
408e9cd19c0SChristoph Hellwig 	if (!rq->bio)
4091e428079SJens Axboe 		return 0;
410d6d48196SJens Axboe 
411e9cd19c0SChristoph Hellwig 	switch (bio_op(rq->bio)) {
412a6f0788eSChaitanya Kulkarni 	case REQ_OP_DISCARD:
413a6f0788eSChaitanya Kulkarni 	case REQ_OP_SECURE_ERASE:
414a958937fSDavid Jeffery 		if (queue_max_discard_segments(rq->q) > 1) {
415a958937fSDavid Jeffery 			struct bio *bio = rq->bio;
416a958937fSDavid Jeffery 
417a958937fSDavid Jeffery 			for_each_bio(bio)
418a958937fSDavid Jeffery 				nr_phys_segs++;
419a958937fSDavid Jeffery 			return nr_phys_segs;
420a958937fSDavid Jeffery 		}
421a958937fSDavid Jeffery 		return 1;
422a6f0788eSChaitanya Kulkarni 	case REQ_OP_WRITE_ZEROES:
423f9d03f96SChristoph Hellwig 		return 0;
4242d9b02beSBart Van Assche 	default:
4252d9b02beSBart Van Assche 		break;
426a6f0788eSChaitanya Kulkarni 	}
4275cb8850cSKent Overstreet 
428e9cd19c0SChristoph Hellwig 	rq_for_each_bvec(bv, rq, iter)
429c55ddd90SChristoph Hellwig 		bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
430708b25b3SBart Van Assche 				UINT_MAX, UINT_MAX);
4311e428079SJens Axboe 	return nr_phys_segs;
4321e428079SJens Axboe }
4331e428079SJens Axboe 
43448d7727cSMing Lei static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
435862e5a5eSMing Lei 		struct scatterlist *sglist)
436862e5a5eSMing Lei {
437862e5a5eSMing Lei 	if (!*sg)
438862e5a5eSMing Lei 		return sglist;
439862e5a5eSMing Lei 
440862e5a5eSMing Lei 	/*
441862e5a5eSMing Lei 	 * If the driver previously mapped a shorter list, we could see a
442862e5a5eSMing Lei 	 * termination bit prematurely unless it fully inits the sg table
443862e5a5eSMing Lei 	 * on each mapping. We KNOW that there must be more entries here
444862e5a5eSMing Lei 	 * or the driver would be buggy, so force clear the termination bit
445862e5a5eSMing Lei 	 * to avoid doing a full sg_init_table() in drivers for each command.
446862e5a5eSMing Lei 	 */
447862e5a5eSMing Lei 	sg_unmark_end(*sg);
448862e5a5eSMing Lei 	return sg_next(*sg);
449862e5a5eSMing Lei }
450862e5a5eSMing Lei 
451862e5a5eSMing Lei static unsigned blk_bvec_map_sg(struct request_queue *q,
452862e5a5eSMing Lei 		struct bio_vec *bvec, struct scatterlist *sglist,
453862e5a5eSMing Lei 		struct scatterlist **sg)
454862e5a5eSMing Lei {
455862e5a5eSMing Lei 	unsigned nbytes = bvec->bv_len;
4568a96a0e4SChristoph Hellwig 	unsigned nsegs = 0, total = 0;
457862e5a5eSMing Lei 
458862e5a5eSMing Lei 	while (nbytes > 0) {
4598a96a0e4SChristoph Hellwig 		unsigned offset = bvec->bv_offset + total;
460c55ddd90SChristoph Hellwig 		unsigned len = min(get_max_segment_size(&q->limits,
461c55ddd90SChristoph Hellwig 				   bvec->bv_page, offset), nbytes);
462f9f76879SChristoph Hellwig 		struct page *page = bvec->bv_page;
463f9f76879SChristoph Hellwig 
464f9f76879SChristoph Hellwig 		/*
465f9f76879SChristoph Hellwig 		 * Unfortunately a fair number of drivers barf on scatterlists
466f9f76879SChristoph Hellwig 		 * that have an offset larger than PAGE_SIZE, despite other
467f9f76879SChristoph Hellwig 		 * subsystems dealing with that invariant just fine.  For now
468f9f76879SChristoph Hellwig 		 * stick to the legacy format where we never present those from
469f9f76879SChristoph Hellwig 		 * the block layer, but the code below should be removed once
470f9f76879SChristoph Hellwig 		 * these offenders (mostly MMC/SD drivers) are fixed.
471f9f76879SChristoph Hellwig 		 */
472f9f76879SChristoph Hellwig 		page += (offset >> PAGE_SHIFT);
473f9f76879SChristoph Hellwig 		offset &= ~PAGE_MASK;
474862e5a5eSMing Lei 
475862e5a5eSMing Lei 		*sg = blk_next_sg(sg, sglist);
476f9f76879SChristoph Hellwig 		sg_set_page(*sg, page, len, offset);
477862e5a5eSMing Lei 
4788a96a0e4SChristoph Hellwig 		total += len;
4798a96a0e4SChristoph Hellwig 		nbytes -= len;
480862e5a5eSMing Lei 		nsegs++;
481862e5a5eSMing Lei 	}
482862e5a5eSMing Lei 
483862e5a5eSMing Lei 	return nsegs;
484862e5a5eSMing Lei }
485862e5a5eSMing Lei 
48616e3e418SMing Lei static inline int __blk_bvec_map_sg(struct bio_vec bv,
48716e3e418SMing Lei 		struct scatterlist *sglist, struct scatterlist **sg)
48816e3e418SMing Lei {
48916e3e418SMing Lei 	*sg = blk_next_sg(sg, sglist);
49016e3e418SMing Lei 	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
49116e3e418SMing Lei 	return 1;
49216e3e418SMing Lei }
49316e3e418SMing Lei 
494f6970f83SMing Lei /* only try to merge bvecs into one sg if they are from two bios */
495f6970f83SMing Lei static inline bool
496f6970f83SMing Lei __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
497f6970f83SMing Lei 			   struct bio_vec *bvprv, struct scatterlist **sg)
498963ab9e5SAsias He {
499963ab9e5SAsias He 
500963ab9e5SAsias He 	int nbytes = bvec->bv_len;
501963ab9e5SAsias He 
502f6970f83SMing Lei 	if (!*sg)
503f6970f83SMing Lei 		return false;
504f6970f83SMing Lei 
505b4b6cb61SMing Lei 	if ((*sg)->length + nbytes > queue_max_segment_size(q))
506f6970f83SMing Lei 		return false;
507f6970f83SMing Lei 
5083dccdae5SChristoph Hellwig 	if (!biovec_phys_mergeable(q, bvprv, bvec))
509f6970f83SMing Lei 		return false;
510963ab9e5SAsias He 
511963ab9e5SAsias He 	(*sg)->length += nbytes;
512f6970f83SMing Lei 
513f6970f83SMing Lei 	return true;
514963ab9e5SAsias He }
515963ab9e5SAsias He 
5165cb8850cSKent Overstreet static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
5175cb8850cSKent Overstreet 			     struct scatterlist *sglist,
5185cb8850cSKent Overstreet 			     struct scatterlist **sg)
5195cb8850cSKent Overstreet {
5203f649ab7SKees Cook 	struct bio_vec bvec, bvprv = { NULL };
5215cb8850cSKent Overstreet 	struct bvec_iter iter;
52238417468SChristoph Hellwig 	int nsegs = 0;
523f6970f83SMing Lei 	bool new_bio = false;
5245cb8850cSKent Overstreet 
525f6970f83SMing Lei 	for_each_bio(bio) {
526f6970f83SMing Lei 		bio_for_each_bvec(bvec, bio, iter) {
527f6970f83SMing Lei 			/*
528f6970f83SMing Lei 			 * Only try to merge bvecs from two bios given we
529f6970f83SMing Lei 			 * have done bio internal merge when adding pages
530f6970f83SMing Lei 			 * to bio
531f6970f83SMing Lei 			 */
532f6970f83SMing Lei 			if (new_bio &&
533f6970f83SMing Lei 			    __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
534f6970f83SMing Lei 				goto next_bvec;
535f6970f83SMing Lei 
536f6970f83SMing Lei 			if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
537f6970f83SMing Lei 				nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
538f6970f83SMing Lei 			else
539f6970f83SMing Lei 				nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
540f6970f83SMing Lei  next_bvec:
541f6970f83SMing Lei 			new_bio = false;
542f6970f83SMing Lei 		}
543b21e11c5SMing Lei 		if (likely(bio->bi_iter.bi_size)) {
544f6970f83SMing Lei 			bvprv = bvec;
545f6970f83SMing Lei 			new_bio = true;
546f6970f83SMing Lei 		}
547b21e11c5SMing Lei 	}
5485cb8850cSKent Overstreet 
5495cb8850cSKent Overstreet 	return nsegs;
5505cb8850cSKent Overstreet }
5515cb8850cSKent Overstreet 
552d6d48196SJens Axboe /*
553d6d48196SJens Axboe  * map a request to scatterlist, return number of sg entries setup. Caller
554d6d48196SJens Axboe  * must make sure sg can hold rq->nr_phys_segments entries
555d6d48196SJens Axboe  */
55689de1504SChristoph Hellwig int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
55789de1504SChristoph Hellwig 		struct scatterlist *sglist, struct scatterlist **last_sg)
558d6d48196SJens Axboe {
5595cb8850cSKent Overstreet 	int nsegs = 0;
560d6d48196SJens Axboe 
561f9d03f96SChristoph Hellwig 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
56289de1504SChristoph Hellwig 		nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
563f9d03f96SChristoph Hellwig 	else if (rq->bio)
56489de1504SChristoph Hellwig 		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
565f18573abSFUJITA Tomonori 
56689de1504SChristoph Hellwig 	if (*last_sg)
56789de1504SChristoph Hellwig 		sg_mark_end(*last_sg);
568d6d48196SJens Axboe 
56912e57f59SMing Lei 	/*
57012e57f59SMing Lei 	 * Something must have been wrong if the figured number of
57112e57f59SMing Lei 	 * segment is bigger than number of req's physical segments
57212e57f59SMing Lei 	 */
573f9d03f96SChristoph Hellwig 	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
57412e57f59SMing Lei 
575d6d48196SJens Axboe 	return nsegs;
576d6d48196SJens Axboe }
57789de1504SChristoph Hellwig EXPORT_SYMBOL(__blk_rq_map_sg);
578d6d48196SJens Axboe 
579943b40c8SMing Lei static inline unsigned int blk_rq_get_max_segments(struct request *rq)
580943b40c8SMing Lei {
581943b40c8SMing Lei 	if (req_op(rq) == REQ_OP_DISCARD)
582943b40c8SMing Lei 		return queue_max_discard_segments(rq->q);
583943b40c8SMing Lei 	return queue_max_segments(rq->q);
584943b40c8SMing Lei }
585943b40c8SMing Lei 
586badf7f64SChristoph Hellwig static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
587badf7f64SChristoph Hellwig 						  sector_t offset)
588badf7f64SChristoph Hellwig {
589badf7f64SChristoph Hellwig 	struct request_queue *q = rq->q;
590c8875190SChristoph Hellwig 	unsigned int max_sectors;
591badf7f64SChristoph Hellwig 
592badf7f64SChristoph Hellwig 	if (blk_rq_is_passthrough(rq))
593badf7f64SChristoph Hellwig 		return q->limits.max_hw_sectors;
594badf7f64SChristoph Hellwig 
595c8875190SChristoph Hellwig 	max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
596badf7f64SChristoph Hellwig 	if (!q->limits.chunk_sectors ||
597badf7f64SChristoph Hellwig 	    req_op(rq) == REQ_OP_DISCARD ||
598badf7f64SChristoph Hellwig 	    req_op(rq) == REQ_OP_SECURE_ERASE)
599c8875190SChristoph Hellwig 		return max_sectors;
600c8875190SChristoph Hellwig 	return min(max_sectors,
601c8875190SChristoph Hellwig 		   blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
602badf7f64SChristoph Hellwig }
603badf7f64SChristoph Hellwig 
60414ccb66bSChristoph Hellwig static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
60514ccb66bSChristoph Hellwig 		unsigned int nr_phys_segs)
606d6d48196SJens Axboe {
6076b2b0459STejun Heo 	if (!blk_cgroup_mergeable(req, bio))
6086b2b0459STejun Heo 		goto no_merge;
6096b2b0459STejun Heo 
6102705dfb2SMing Lei 	if (blk_integrity_merge_bio(req->q, req, bio) == false)
61113f05c8dSMartin K. Petersen 		goto no_merge;
61213f05c8dSMartin K. Petersen 
6132705dfb2SMing Lei 	/* discard request merge won't add new segment */
6142705dfb2SMing Lei 	if (req_op(req) == REQ_OP_DISCARD)
6152705dfb2SMing Lei 		return 1;
6162705dfb2SMing Lei 
6172705dfb2SMing Lei 	if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
61813f05c8dSMartin K. Petersen 		goto no_merge;
619d6d48196SJens Axboe 
620d6d48196SJens Axboe 	/*
621d6d48196SJens Axboe 	 * This will form the start of a new hw segment.  Bump both
622d6d48196SJens Axboe 	 * counters.
623d6d48196SJens Axboe 	 */
624d6d48196SJens Axboe 	req->nr_phys_segments += nr_phys_segs;
625d6d48196SJens Axboe 	return 1;
62613f05c8dSMartin K. Petersen 
62713f05c8dSMartin K. Petersen no_merge:
62814ccb66bSChristoph Hellwig 	req_set_nomerge(req->q, req);
62913f05c8dSMartin K. Petersen 	return 0;
630d6d48196SJens Axboe }
631d6d48196SJens Axboe 
63214ccb66bSChristoph Hellwig int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
633d6d48196SJens Axboe {
6345e7c4274SJens Axboe 	if (req_gap_back_merge(req, bio))
6355e7c4274SJens Axboe 		return 0;
6367f39add3SSagi Grimberg 	if (blk_integrity_rq(req) &&
6377f39add3SSagi Grimberg 	    integrity_req_gap_back_merge(req, bio))
6387f39add3SSagi Grimberg 		return 0;
639a892c8d5SSatya Tangirala 	if (!bio_crypt_ctx_back_mergeable(req, bio))
640a892c8d5SSatya Tangirala 		return 0;
641f31dc1cdSMartin K. Petersen 	if (blk_rq_sectors(req) + bio_sectors(bio) >
64217007f39SDamien Le Moal 	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
64314ccb66bSChristoph Hellwig 		req_set_nomerge(req->q, req);
644d6d48196SJens Axboe 		return 0;
645d6d48196SJens Axboe 	}
646d6d48196SJens Axboe 
64714ccb66bSChristoph Hellwig 	return ll_new_hw_segment(req, bio, nr_segs);
648d6d48196SJens Axboe }
649d6d48196SJens Axboe 
650eda5cc99SChristoph Hellwig static int ll_front_merge_fn(struct request *req, struct bio *bio,
651eda5cc99SChristoph Hellwig 		unsigned int nr_segs)
652d6d48196SJens Axboe {
6535e7c4274SJens Axboe 	if (req_gap_front_merge(req, bio))
6545e7c4274SJens Axboe 		return 0;
6557f39add3SSagi Grimberg 	if (blk_integrity_rq(req) &&
6567f39add3SSagi Grimberg 	    integrity_req_gap_front_merge(req, bio))
6577f39add3SSagi Grimberg 		return 0;
658a892c8d5SSatya Tangirala 	if (!bio_crypt_ctx_front_mergeable(req, bio))
659a892c8d5SSatya Tangirala 		return 0;
660f31dc1cdSMartin K. Petersen 	if (blk_rq_sectors(req) + bio_sectors(bio) >
66117007f39SDamien Le Moal 	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
66214ccb66bSChristoph Hellwig 		req_set_nomerge(req->q, req);
663d6d48196SJens Axboe 		return 0;
664d6d48196SJens Axboe 	}
665d6d48196SJens Axboe 
66614ccb66bSChristoph Hellwig 	return ll_new_hw_segment(req, bio, nr_segs);
667d6d48196SJens Axboe }
668d6d48196SJens Axboe 
669445251d0SJens Axboe static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
670445251d0SJens Axboe 		struct request *next)
671445251d0SJens Axboe {
672445251d0SJens Axboe 	unsigned short segments = blk_rq_nr_discard_segments(req);
673445251d0SJens Axboe 
674445251d0SJens Axboe 	if (segments >= queue_max_discard_segments(q))
675445251d0SJens Axboe 		goto no_merge;
676445251d0SJens Axboe 	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
677445251d0SJens Axboe 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
678445251d0SJens Axboe 		goto no_merge;
679445251d0SJens Axboe 
680445251d0SJens Axboe 	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
681445251d0SJens Axboe 	return true;
682445251d0SJens Axboe no_merge:
683445251d0SJens Axboe 	req_set_nomerge(q, req);
684445251d0SJens Axboe 	return false;
685445251d0SJens Axboe }
686445251d0SJens Axboe 
687d6d48196SJens Axboe static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
688d6d48196SJens Axboe 				struct request *next)
689d6d48196SJens Axboe {
690d6d48196SJens Axboe 	int total_phys_segments;
691d6d48196SJens Axboe 
6925e7c4274SJens Axboe 	if (req_gap_back_merge(req, next->bio))
693854fbb9cSKeith Busch 		return 0;
694854fbb9cSKeith Busch 
695d6d48196SJens Axboe 	/*
696d6d48196SJens Axboe 	 * Will it become too large?
697d6d48196SJens Axboe 	 */
698f31dc1cdSMartin K. Petersen 	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
69917007f39SDamien Le Moal 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
700d6d48196SJens Axboe 		return 0;
701d6d48196SJens Axboe 
702d6d48196SJens Axboe 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
703943b40c8SMing Lei 	if (total_phys_segments > blk_rq_get_max_segments(req))
704d6d48196SJens Axboe 		return 0;
705d6d48196SJens Axboe 
7066b2b0459STejun Heo 	if (!blk_cgroup_mergeable(req, next->bio))
7076b2b0459STejun Heo 		return 0;
7086b2b0459STejun Heo 
7094eaf99beSMartin K. Petersen 	if (blk_integrity_merge_rq(q, req, next) == false)
71013f05c8dSMartin K. Petersen 		return 0;
71113f05c8dSMartin K. Petersen 
712a892c8d5SSatya Tangirala 	if (!bio_crypt_ctx_merge_rq(req, next))
713a892c8d5SSatya Tangirala 		return 0;
714a892c8d5SSatya Tangirala 
715d6d48196SJens Axboe 	/* Merge is OK... */
716d6d48196SJens Axboe 	req->nr_phys_segments = total_phys_segments;
717d6d48196SJens Axboe 	return 1;
718d6d48196SJens Axboe }
719d6d48196SJens Axboe 
72080a761fdSTejun Heo /**
72180a761fdSTejun Heo  * blk_rq_set_mixed_merge - mark a request as mixed merge
72280a761fdSTejun Heo  * @rq: request to mark as mixed merge
72380a761fdSTejun Heo  *
72480a761fdSTejun Heo  * Description:
72580a761fdSTejun Heo  *     @rq is about to be mixed merged.  Make sure the attributes
72680a761fdSTejun Heo  *     which can be mixed are set in each bio and mark @rq as mixed
72780a761fdSTejun Heo  *     merged.
72880a761fdSTejun Heo  */
72980a761fdSTejun Heo void blk_rq_set_mixed_merge(struct request *rq)
73080a761fdSTejun Heo {
73116458cf3SBart Van Assche 	blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
73280a761fdSTejun Heo 	struct bio *bio;
73380a761fdSTejun Heo 
734e8064021SChristoph Hellwig 	if (rq->rq_flags & RQF_MIXED_MERGE)
73580a761fdSTejun Heo 		return;
73680a761fdSTejun Heo 
73780a761fdSTejun Heo 	/*
73880a761fdSTejun Heo 	 * @rq will no longer represent mixable attributes for all the
73980a761fdSTejun Heo 	 * contained bios.  It will just track those of the first one.
74080a761fdSTejun Heo 	 * Distributes the attributs to each bio.
74180a761fdSTejun Heo 	 */
74280a761fdSTejun Heo 	for (bio = rq->bio; bio; bio = bio->bi_next) {
7431eff9d32SJens Axboe 		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
7441eff9d32SJens Axboe 			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
7451eff9d32SJens Axboe 		bio->bi_opf |= ff;
74680a761fdSTejun Heo 	}
747e8064021SChristoph Hellwig 	rq->rq_flags |= RQF_MIXED_MERGE;
74880a761fdSTejun Heo }
74980a761fdSTejun Heo 
750b9c54f56SKonstantin Khlebnikov static void blk_account_io_merge_request(struct request *req)
75126308eabSJerome Marchand {
75226308eabSJerome Marchand 	if (blk_do_io_stat(req)) {
753112f158fSMike Snitzer 		part_stat_lock();
754b9c54f56SKonstantin Khlebnikov 		part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
75526308eabSJerome Marchand 		part_stat_unlock();
75626308eabSJerome Marchand 	}
75726308eabSJerome Marchand }
758b9c54f56SKonstantin Khlebnikov 
759e96c0d83SEric Biggers static enum elv_merge blk_try_req_merge(struct request *req,
760e96c0d83SEric Biggers 					struct request *next)
76169840466SJianchao Wang {
76269840466SJianchao Wang 	if (blk_discard_mergable(req))
76369840466SJianchao Wang 		return ELEVATOR_DISCARD_MERGE;
76469840466SJianchao Wang 	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
76569840466SJianchao Wang 		return ELEVATOR_BACK_MERGE;
76669840466SJianchao Wang 
76769840466SJianchao Wang 	return ELEVATOR_NO_MERGE;
76869840466SJianchao Wang }
76926308eabSJerome Marchand 
770d6d48196SJens Axboe /*
771b973cb7eSJens Axboe  * For non-mq, this has to be called with the request spinlock acquired.
772b973cb7eSJens Axboe  * For mq with scheduling, the appropriate queue wide lock should be held.
773d6d48196SJens Axboe  */
774b973cb7eSJens Axboe static struct request *attempt_merge(struct request_queue *q,
775b973cb7eSJens Axboe 				     struct request *req, struct request *next)
776d6d48196SJens Axboe {
777d6d48196SJens Axboe 	if (!rq_mergeable(req) || !rq_mergeable(next))
778b973cb7eSJens Axboe 		return NULL;
779d6d48196SJens Axboe 
780288dab8aSChristoph Hellwig 	if (req_op(req) != req_op(next))
781b973cb7eSJens Axboe 		return NULL;
782f31dc1cdSMartin K. Petersen 
78379bb1dbdSChristoph Hellwig 	if (rq_data_dir(req) != rq_data_dir(next))
784b973cb7eSJens Axboe 		return NULL;
785d6d48196SJens Axboe 
786668ffc03SDamien Le Moal 	if (req->ioprio != next->ioprio)
787668ffc03SDamien Le Moal 		return NULL;
788668ffc03SDamien Le Moal 
789cb6934f8SJens Axboe 	/*
790d6d48196SJens Axboe 	 * If we are allowed to merge, then append bio list
791d6d48196SJens Axboe 	 * from next to rq and release next. merge_requests_fn
792d6d48196SJens Axboe 	 * will have updated segment counts, update sector
793445251d0SJens Axboe 	 * counts here. Handle DISCARDs separately, as they
794445251d0SJens Axboe 	 * have separate settings.
795d6d48196SJens Axboe 	 */
79669840466SJianchao Wang 
79769840466SJianchao Wang 	switch (blk_try_req_merge(req, next)) {
79869840466SJianchao Wang 	case ELEVATOR_DISCARD_MERGE:
799445251d0SJens Axboe 		if (!req_attempt_discard_merge(q, req, next))
800445251d0SJens Axboe 			return NULL;
80169840466SJianchao Wang 		break;
80269840466SJianchao Wang 	case ELEVATOR_BACK_MERGE:
80369840466SJianchao Wang 		if (!ll_merge_requests_fn(q, req, next))
804b973cb7eSJens Axboe 			return NULL;
80569840466SJianchao Wang 		break;
80669840466SJianchao Wang 	default:
80769840466SJianchao Wang 		return NULL;
80869840466SJianchao Wang 	}
809d6d48196SJens Axboe 
810d6d48196SJens Axboe 	/*
81180a761fdSTejun Heo 	 * If failfast settings disagree or any of the two is already
81280a761fdSTejun Heo 	 * a mixed merge, mark both as mixed before proceeding.  This
81380a761fdSTejun Heo 	 * makes sure that all involved bios have mixable attributes
81480a761fdSTejun Heo 	 * set properly.
81580a761fdSTejun Heo 	 */
816e8064021SChristoph Hellwig 	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
81780a761fdSTejun Heo 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
81880a761fdSTejun Heo 	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
81980a761fdSTejun Heo 		blk_rq_set_mixed_merge(req);
82080a761fdSTejun Heo 		blk_rq_set_mixed_merge(next);
82180a761fdSTejun Heo 	}
82280a761fdSTejun Heo 
82380a761fdSTejun Heo 	/*
824522a7775SOmar Sandoval 	 * At this point we have either done a back merge or front merge. We
825522a7775SOmar Sandoval 	 * need the smaller start_time_ns of the merged requests to be the
826522a7775SOmar Sandoval 	 * current request for accounting purposes.
827d6d48196SJens Axboe 	 */
828522a7775SOmar Sandoval 	if (next->start_time_ns < req->start_time_ns)
829522a7775SOmar Sandoval 		req->start_time_ns = next->start_time_ns;
830d6d48196SJens Axboe 
831d6d48196SJens Axboe 	req->biotail->bi_next = next->bio;
832d6d48196SJens Axboe 	req->biotail = next->biotail;
833d6d48196SJens Axboe 
834a2dec7b3STejun Heo 	req->__data_len += blk_rq_bytes(next);
835d6d48196SJens Axboe 
8362a5cf35cSMing Lei 	if (!blk_discard_mergable(req))
837d6d48196SJens Axboe 		elv_merge_requests(q, req, next);
838d6d48196SJens Axboe 
83942dad764SJerome Marchand 	/*
84042dad764SJerome Marchand 	 * 'next' is going away, so update stats accordingly
84142dad764SJerome Marchand 	 */
842b9c54f56SKonstantin Khlebnikov 	blk_account_io_merge_request(next);
843d6d48196SJens Axboe 
844a54895faSChristoph Hellwig 	trace_block_rq_merge(next);
845f3bdc62fSJan Kara 
846e4d750c9SJens Axboe 	/*
847e4d750c9SJens Axboe 	 * ownership of bio passed from next to req, return 'next' for
848e4d750c9SJens Axboe 	 * the caller to free
849e4d750c9SJens Axboe 	 */
8501cd96c24SBoaz Harrosh 	next->bio = NULL;
851b973cb7eSJens Axboe 	return next;
852d6d48196SJens Axboe }
853d6d48196SJens Axboe 
854eda5cc99SChristoph Hellwig static struct request *attempt_back_merge(struct request_queue *q,
855eda5cc99SChristoph Hellwig 		struct request *rq)
856d6d48196SJens Axboe {
857d6d48196SJens Axboe 	struct request *next = elv_latter_request(q, rq);
858d6d48196SJens Axboe 
859d6d48196SJens Axboe 	if (next)
860d6d48196SJens Axboe 		return attempt_merge(q, rq, next);
861d6d48196SJens Axboe 
862b973cb7eSJens Axboe 	return NULL;
863d6d48196SJens Axboe }
864d6d48196SJens Axboe 
865eda5cc99SChristoph Hellwig static struct request *attempt_front_merge(struct request_queue *q,
866eda5cc99SChristoph Hellwig 		struct request *rq)
867d6d48196SJens Axboe {
868d6d48196SJens Axboe 	struct request *prev = elv_former_request(q, rq);
869d6d48196SJens Axboe 
870d6d48196SJens Axboe 	if (prev)
871d6d48196SJens Axboe 		return attempt_merge(q, prev, rq);
872d6d48196SJens Axboe 
873b973cb7eSJens Axboe 	return NULL;
874d6d48196SJens Axboe }
8755e84ea3aSJens Axboe 
876fd2ef39cSJan Kara /*
877fd2ef39cSJan Kara  * Try to merge 'next' into 'rq'. Return true if the merge happened, false
878fd2ef39cSJan Kara  * otherwise. The caller is responsible for freeing 'next' if the merge
879fd2ef39cSJan Kara  * happened.
880fd2ef39cSJan Kara  */
881fd2ef39cSJan Kara bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
8825e84ea3aSJens Axboe 			   struct request *next)
8835e84ea3aSJens Axboe {
884fd2ef39cSJan Kara 	return attempt_merge(q, rq, next);
8855e84ea3aSJens Axboe }
886050c8ea8STejun Heo 
887050c8ea8STejun Heo bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
888050c8ea8STejun Heo {
889e2a60da7SMartin K. Petersen 	if (!rq_mergeable(rq) || !bio_mergeable(bio))
890050c8ea8STejun Heo 		return false;
891050c8ea8STejun Heo 
892288dab8aSChristoph Hellwig 	if (req_op(rq) != bio_op(bio))
893f31dc1cdSMartin K. Petersen 		return false;
894f31dc1cdSMartin K. Petersen 
895050c8ea8STejun Heo 	/* different data direction or already started, don't merge */
896050c8ea8STejun Heo 	if (bio_data_dir(bio) != rq_data_dir(rq))
897050c8ea8STejun Heo 		return false;
898050c8ea8STejun Heo 
8996b2b0459STejun Heo 	/* don't merge across cgroup boundaries */
9006b2b0459STejun Heo 	if (!blk_cgroup_mergeable(rq, bio))
9016b2b0459STejun Heo 		return false;
9026b2b0459STejun Heo 
903050c8ea8STejun Heo 	/* only merge integrity protected bio into ditto rq */
9044eaf99beSMartin K. Petersen 	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
905050c8ea8STejun Heo 		return false;
906050c8ea8STejun Heo 
907a892c8d5SSatya Tangirala 	/* Only merge if the crypt contexts are compatible */
908a892c8d5SSatya Tangirala 	if (!bio_crypt_rq_ctx_compatible(rq, bio))
909a892c8d5SSatya Tangirala 		return false;
910a892c8d5SSatya Tangirala 
911668ffc03SDamien Le Moal 	if (rq->ioprio != bio_prio(bio))
912668ffc03SDamien Le Moal 		return false;
913668ffc03SDamien Le Moal 
914050c8ea8STejun Heo 	return true;
915050c8ea8STejun Heo }
916050c8ea8STejun Heo 
91734fe7c05SChristoph Hellwig enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
918050c8ea8STejun Heo {
91969840466SJianchao Wang 	if (blk_discard_mergable(rq))
9201e739730SChristoph Hellwig 		return ELEVATOR_DISCARD_MERGE;
9211e739730SChristoph Hellwig 	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
922050c8ea8STejun Heo 		return ELEVATOR_BACK_MERGE;
9234f024f37SKent Overstreet 	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
924050c8ea8STejun Heo 		return ELEVATOR_FRONT_MERGE;
925050c8ea8STejun Heo 	return ELEVATOR_NO_MERGE;
926050c8ea8STejun Heo }
9278e756373SBaolin Wang 
9288e756373SBaolin Wang static void blk_account_io_merge_bio(struct request *req)
9298e756373SBaolin Wang {
9308e756373SBaolin Wang 	if (!blk_do_io_stat(req))
9318e756373SBaolin Wang 		return;
9328e756373SBaolin Wang 
9338e756373SBaolin Wang 	part_stat_lock();
9348e756373SBaolin Wang 	part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
9358e756373SBaolin Wang 	part_stat_unlock();
9368e756373SBaolin Wang }
9378e756373SBaolin Wang 
938eda5cc99SChristoph Hellwig enum bio_merge_status {
939eda5cc99SChristoph Hellwig 	BIO_MERGE_OK,
940eda5cc99SChristoph Hellwig 	BIO_MERGE_NONE,
941eda5cc99SChristoph Hellwig 	BIO_MERGE_FAILED,
942eda5cc99SChristoph Hellwig };
943eda5cc99SChristoph Hellwig 
944eda5cc99SChristoph Hellwig static enum bio_merge_status bio_attempt_back_merge(struct request *req,
945eda5cc99SChristoph Hellwig 		struct bio *bio, unsigned int nr_segs)
9468e756373SBaolin Wang {
94716458cf3SBart Van Assche 	const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
9488e756373SBaolin Wang 
9498e756373SBaolin Wang 	if (!ll_back_merge_fn(req, bio, nr_segs))
9507d7ca7c5SBaolin Wang 		return BIO_MERGE_FAILED;
9518e756373SBaolin Wang 
952e8a676d6SChristoph Hellwig 	trace_block_bio_backmerge(bio);
9538e756373SBaolin Wang 	rq_qos_merge(req->q, req, bio);
9548e756373SBaolin Wang 
9558e756373SBaolin Wang 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
9568e756373SBaolin Wang 		blk_rq_set_mixed_merge(req);
9578e756373SBaolin Wang 
9588e756373SBaolin Wang 	req->biotail->bi_next = bio;
9598e756373SBaolin Wang 	req->biotail = bio;
9608e756373SBaolin Wang 	req->__data_len += bio->bi_iter.bi_size;
9618e756373SBaolin Wang 
9628e756373SBaolin Wang 	bio_crypt_free_ctx(bio);
9638e756373SBaolin Wang 
9648e756373SBaolin Wang 	blk_account_io_merge_bio(req);
9657d7ca7c5SBaolin Wang 	return BIO_MERGE_OK;
9668e756373SBaolin Wang }
9678e756373SBaolin Wang 
968eda5cc99SChristoph Hellwig static enum bio_merge_status bio_attempt_front_merge(struct request *req,
969eda5cc99SChristoph Hellwig 		struct bio *bio, unsigned int nr_segs)
9708e756373SBaolin Wang {
97116458cf3SBart Van Assche 	const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
9728e756373SBaolin Wang 
9738e756373SBaolin Wang 	if (!ll_front_merge_fn(req, bio, nr_segs))
9747d7ca7c5SBaolin Wang 		return BIO_MERGE_FAILED;
9758e756373SBaolin Wang 
976e8a676d6SChristoph Hellwig 	trace_block_bio_frontmerge(bio);
9778e756373SBaolin Wang 	rq_qos_merge(req->q, req, bio);
9788e756373SBaolin Wang 
9798e756373SBaolin Wang 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
9808e756373SBaolin Wang 		blk_rq_set_mixed_merge(req);
9818e756373SBaolin Wang 
9828e756373SBaolin Wang 	bio->bi_next = req->bio;
9838e756373SBaolin Wang 	req->bio = bio;
9848e756373SBaolin Wang 
9858e756373SBaolin Wang 	req->__sector = bio->bi_iter.bi_sector;
9868e756373SBaolin Wang 	req->__data_len += bio->bi_iter.bi_size;
9878e756373SBaolin Wang 
9888e756373SBaolin Wang 	bio_crypt_do_front_merge(req, bio);
9898e756373SBaolin Wang 
9908e756373SBaolin Wang 	blk_account_io_merge_bio(req);
9917d7ca7c5SBaolin Wang 	return BIO_MERGE_OK;
9928e756373SBaolin Wang }
9938e756373SBaolin Wang 
994eda5cc99SChristoph Hellwig static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
995eda5cc99SChristoph Hellwig 		struct request *req, struct bio *bio)
9968e756373SBaolin Wang {
9978e756373SBaolin Wang 	unsigned short segments = blk_rq_nr_discard_segments(req);
9988e756373SBaolin Wang 
9998e756373SBaolin Wang 	if (segments >= queue_max_discard_segments(q))
10008e756373SBaolin Wang 		goto no_merge;
10018e756373SBaolin Wang 	if (blk_rq_sectors(req) + bio_sectors(bio) >
10028e756373SBaolin Wang 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
10038e756373SBaolin Wang 		goto no_merge;
10048e756373SBaolin Wang 
10058e756373SBaolin Wang 	rq_qos_merge(q, req, bio);
10068e756373SBaolin Wang 
10078e756373SBaolin Wang 	req->biotail->bi_next = bio;
10088e756373SBaolin Wang 	req->biotail = bio;
10098e756373SBaolin Wang 	req->__data_len += bio->bi_iter.bi_size;
10108e756373SBaolin Wang 	req->nr_phys_segments = segments + 1;
10118e756373SBaolin Wang 
10128e756373SBaolin Wang 	blk_account_io_merge_bio(req);
10137d7ca7c5SBaolin Wang 	return BIO_MERGE_OK;
10148e756373SBaolin Wang no_merge:
10158e756373SBaolin Wang 	req_set_nomerge(q, req);
10167d7ca7c5SBaolin Wang 	return BIO_MERGE_FAILED;
10177d7ca7c5SBaolin Wang }
10187d7ca7c5SBaolin Wang 
10197d7ca7c5SBaolin Wang static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
10207d7ca7c5SBaolin Wang 						   struct request *rq,
10217d7ca7c5SBaolin Wang 						   struct bio *bio,
10227d7ca7c5SBaolin Wang 						   unsigned int nr_segs,
10237d7ca7c5SBaolin Wang 						   bool sched_allow_merge)
10247d7ca7c5SBaolin Wang {
10257d7ca7c5SBaolin Wang 	if (!blk_rq_merge_ok(rq, bio))
10267d7ca7c5SBaolin Wang 		return BIO_MERGE_NONE;
10277d7ca7c5SBaolin Wang 
10287d7ca7c5SBaolin Wang 	switch (blk_try_merge(rq, bio)) {
10297d7ca7c5SBaolin Wang 	case ELEVATOR_BACK_MERGE:
1030265600b7SBaolin Wang 		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
10317d7ca7c5SBaolin Wang 			return bio_attempt_back_merge(rq, bio, nr_segs);
10327d7ca7c5SBaolin Wang 		break;
10337d7ca7c5SBaolin Wang 	case ELEVATOR_FRONT_MERGE:
1034265600b7SBaolin Wang 		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
10357d7ca7c5SBaolin Wang 			return bio_attempt_front_merge(rq, bio, nr_segs);
10367d7ca7c5SBaolin Wang 		break;
10377d7ca7c5SBaolin Wang 	case ELEVATOR_DISCARD_MERGE:
10387d7ca7c5SBaolin Wang 		return bio_attempt_discard_merge(q, rq, bio);
10397d7ca7c5SBaolin Wang 	default:
10407d7ca7c5SBaolin Wang 		return BIO_MERGE_NONE;
10417d7ca7c5SBaolin Wang 	}
10427d7ca7c5SBaolin Wang 
10437d7ca7c5SBaolin Wang 	return BIO_MERGE_FAILED;
10448e756373SBaolin Wang }
10458e756373SBaolin Wang 
10468e756373SBaolin Wang /**
10478e756373SBaolin Wang  * blk_attempt_plug_merge - try to merge with %current's plugged list
10488e756373SBaolin Wang  * @q: request_queue new bio is being queued at
10498e756373SBaolin Wang  * @bio: new bio being queued
10508e756373SBaolin Wang  * @nr_segs: number of segments in @bio
105187c037d1SJens Axboe  * from the passed in @q already in the plug list
10528e756373SBaolin Wang  *
1053d38a9c04SJens Axboe  * Determine whether @bio being queued on @q can be merged with the previous
1054d38a9c04SJens Axboe  * request on %current's plugged list.  Returns %true if merge was successful,
10558e756373SBaolin Wang  * otherwise %false.
10568e756373SBaolin Wang  *
10578e756373SBaolin Wang  * Plugging coalesces IOs from the same issuer for the same purpose without
10588e756373SBaolin Wang  * going through @q->queue_lock.  As such it's more of an issuing mechanism
10598e756373SBaolin Wang  * than scheduling, and the request, while may have elvpriv data, is not
10608e756373SBaolin Wang  * added on the elevator at this point.  In addition, we don't have
10618e756373SBaolin Wang  * reliable access to the elevator outside queue lock.  Only check basic
10628e756373SBaolin Wang  * merging parameters without querying the elevator.
10638e756373SBaolin Wang  *
10648e756373SBaolin Wang  * Caller must ensure !blk_queue_nomerges(q) beforehand.
10658e756373SBaolin Wang  */
10668e756373SBaolin Wang bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
10670c5bcc92SChristoph Hellwig 		unsigned int nr_segs)
10688e756373SBaolin Wang {
10698e756373SBaolin Wang 	struct blk_plug *plug;
10708e756373SBaolin Wang 	struct request *rq;
10718e756373SBaolin Wang 
10726deacb3bSChristoph Hellwig 	plug = blk_mq_plug(bio);
1073bc490f81SJens Axboe 	if (!plug || rq_list_empty(plug->mq_list))
10748e756373SBaolin Wang 		return false;
10758e756373SBaolin Wang 
10765b205071SJens Axboe 	rq_list_for_each(&plug->mq_list, rq) {
107787c037d1SJens Axboe 		if (rq->q == q) {
1078a1cb6537SMing Lei 			if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1079a1cb6537SMing Lei 			    BIO_MERGE_OK)
10808e756373SBaolin Wang 				return true;
10815b205071SJens Axboe 			break;
10825b205071SJens Axboe 		}
10835b205071SJens Axboe 
10845b205071SJens Axboe 		/*
10855b205071SJens Axboe 		 * Only keep iterating plug list for merges if we have multiple
10865b205071SJens Axboe 		 * queues
10875b205071SJens Axboe 		 */
10885b205071SJens Axboe 		if (!plug->multiple_queues)
10895b205071SJens Axboe 			break;
1090a1cb6537SMing Lei 	}
10918e756373SBaolin Wang 	return false;
10928e756373SBaolin Wang }
1093bdc6a287SBaolin Wang 
1094bdc6a287SBaolin Wang /*
1095bdc6a287SBaolin Wang  * Iterate list of requests and see if we can merge this bio with any
1096bdc6a287SBaolin Wang  * of them.
1097bdc6a287SBaolin Wang  */
1098bdc6a287SBaolin Wang bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1099bdc6a287SBaolin Wang 			struct bio *bio, unsigned int nr_segs)
1100bdc6a287SBaolin Wang {
1101bdc6a287SBaolin Wang 	struct request *rq;
1102bdc6a287SBaolin Wang 	int checked = 8;
1103bdc6a287SBaolin Wang 
1104bdc6a287SBaolin Wang 	list_for_each_entry_reverse(rq, list, queuelist) {
1105bdc6a287SBaolin Wang 		if (!checked--)
1106bdc6a287SBaolin Wang 			break;
1107bdc6a287SBaolin Wang 
11087d7ca7c5SBaolin Wang 		switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
11097d7ca7c5SBaolin Wang 		case BIO_MERGE_NONE:
1110bdc6a287SBaolin Wang 			continue;
11117d7ca7c5SBaolin Wang 		case BIO_MERGE_OK:
11127d7ca7c5SBaolin Wang 			return true;
11137d7ca7c5SBaolin Wang 		case BIO_MERGE_FAILED:
11147d7ca7c5SBaolin Wang 			return false;
1115bdc6a287SBaolin Wang 		}
1116bdc6a287SBaolin Wang 
1117bdc6a287SBaolin Wang 	}
1118bdc6a287SBaolin Wang 
1119bdc6a287SBaolin Wang 	return false;
1120bdc6a287SBaolin Wang }
1121bdc6a287SBaolin Wang EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1122eda5cc99SChristoph Hellwig 
1123eda5cc99SChristoph Hellwig bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1124eda5cc99SChristoph Hellwig 		unsigned int nr_segs, struct request **merged_request)
1125eda5cc99SChristoph Hellwig {
1126eda5cc99SChristoph Hellwig 	struct request *rq;
1127eda5cc99SChristoph Hellwig 
1128eda5cc99SChristoph Hellwig 	switch (elv_merge(q, &rq, bio)) {
1129eda5cc99SChristoph Hellwig 	case ELEVATOR_BACK_MERGE:
1130eda5cc99SChristoph Hellwig 		if (!blk_mq_sched_allow_merge(q, rq, bio))
1131eda5cc99SChristoph Hellwig 			return false;
1132eda5cc99SChristoph Hellwig 		if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1133eda5cc99SChristoph Hellwig 			return false;
1134eda5cc99SChristoph Hellwig 		*merged_request = attempt_back_merge(q, rq);
1135eda5cc99SChristoph Hellwig 		if (!*merged_request)
1136eda5cc99SChristoph Hellwig 			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1137eda5cc99SChristoph Hellwig 		return true;
1138eda5cc99SChristoph Hellwig 	case ELEVATOR_FRONT_MERGE:
1139eda5cc99SChristoph Hellwig 		if (!blk_mq_sched_allow_merge(q, rq, bio))
1140eda5cc99SChristoph Hellwig 			return false;
1141eda5cc99SChristoph Hellwig 		if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1142eda5cc99SChristoph Hellwig 			return false;
1143eda5cc99SChristoph Hellwig 		*merged_request = attempt_front_merge(q, rq);
1144eda5cc99SChristoph Hellwig 		if (!*merged_request)
1145eda5cc99SChristoph Hellwig 			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1146eda5cc99SChristoph Hellwig 		return true;
1147eda5cc99SChristoph Hellwig 	case ELEVATOR_DISCARD_MERGE:
1148eda5cc99SChristoph Hellwig 		return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1149eda5cc99SChristoph Hellwig 	default:
1150eda5cc99SChristoph Hellwig 		return false;
1151eda5cc99SChristoph Hellwig 	}
1152eda5cc99SChristoph Hellwig }
1153eda5cc99SChristoph Hellwig EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
1154