xref: /openbmc/linux/block/blk-merge.c (revision 613b1488)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to segment and merge handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/scatterlist.h>
11 #include <linux/part_stat.h>
12 #include <linux/blk-cgroup.h>
13 
14 #include <trace/events/block.h>
15 
16 #include "blk.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-throttle.h"
20 
21 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
22 {
23 	*bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
24 }
25 
26 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
27 {
28 	struct bvec_iter iter = bio->bi_iter;
29 	int idx;
30 
31 	bio_get_first_bvec(bio, bv);
32 	if (bv->bv_len == bio->bi_iter.bi_size)
33 		return;		/* this bio only has a single bvec */
34 
35 	bio_advance_iter(bio, &iter, iter.bi_size);
36 
37 	if (!iter.bi_bvec_done)
38 		idx = iter.bi_idx - 1;
39 	else	/* in the middle of bvec */
40 		idx = iter.bi_idx;
41 
42 	*bv = bio->bi_io_vec[idx];
43 
44 	/*
45 	 * iter.bi_bvec_done records actual length of the last bvec
46 	 * if this bio ends in the middle of one io vector
47 	 */
48 	if (iter.bi_bvec_done)
49 		bv->bv_len = iter.bi_bvec_done;
50 }
51 
52 static inline bool bio_will_gap(struct request_queue *q,
53 		struct request *prev_rq, struct bio *prev, struct bio *next)
54 {
55 	struct bio_vec pb, nb;
56 
57 	if (!bio_has_data(prev) || !queue_virt_boundary(q))
58 		return false;
59 
60 	/*
61 	 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
62 	 * is quite difficult to respect the sg gap limit.  We work hard to
63 	 * merge a huge number of small single bios in case of mkfs.
64 	 */
65 	if (prev_rq)
66 		bio_get_first_bvec(prev_rq->bio, &pb);
67 	else
68 		bio_get_first_bvec(prev, &pb);
69 	if (pb.bv_offset & queue_virt_boundary(q))
70 		return true;
71 
72 	/*
73 	 * We don't need to worry about the situation that the merged segment
74 	 * ends in unaligned virt boundary:
75 	 *
76 	 * - if 'pb' ends aligned, the merged segment ends aligned
77 	 * - if 'pb' ends unaligned, the next bio must include
78 	 *   one single bvec of 'nb', otherwise the 'nb' can't
79 	 *   merge with 'pb'
80 	 */
81 	bio_get_last_bvec(prev, &pb);
82 	bio_get_first_bvec(next, &nb);
83 	if (biovec_phys_mergeable(q, &pb, &nb))
84 		return false;
85 	return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
86 }
87 
88 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
89 {
90 	return bio_will_gap(req->q, req, req->biotail, bio);
91 }
92 
93 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
94 {
95 	return bio_will_gap(req->q, NULL, bio, req->bio);
96 }
97 
98 /*
99  * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
100  * is defined as 'unsigned int', meantime it has to be aligned to with the
101  * logical block size, which is the minimum accepted unit by hardware.
102  */
103 static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
104 {
105 	return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
106 }
107 
108 static struct bio *bio_split_discard(struct bio *bio,
109 				     const struct queue_limits *lim,
110 				     unsigned *nsegs, struct bio_set *bs)
111 {
112 	unsigned int max_discard_sectors, granularity;
113 	sector_t tmp;
114 	unsigned split_sectors;
115 
116 	*nsegs = 1;
117 
118 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
119 	granularity = max(lim->discard_granularity >> 9, 1U);
120 
121 	max_discard_sectors =
122 		min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
123 	max_discard_sectors -= max_discard_sectors % granularity;
124 
125 	if (unlikely(!max_discard_sectors)) {
126 		/* XXX: warn */
127 		return NULL;
128 	}
129 
130 	if (bio_sectors(bio) <= max_discard_sectors)
131 		return NULL;
132 
133 	split_sectors = max_discard_sectors;
134 
135 	/*
136 	 * If the next starting sector would be misaligned, stop the discard at
137 	 * the previous aligned sector.
138 	 */
139 	tmp = bio->bi_iter.bi_sector + split_sectors -
140 		((lim->discard_alignment >> 9) % granularity);
141 	tmp = sector_div(tmp, granularity);
142 
143 	if (split_sectors > tmp)
144 		split_sectors -= tmp;
145 
146 	return bio_split(bio, split_sectors, GFP_NOIO, bs);
147 }
148 
149 static struct bio *bio_split_write_zeroes(struct bio *bio,
150 					  const struct queue_limits *lim,
151 					  unsigned *nsegs, struct bio_set *bs)
152 {
153 	*nsegs = 0;
154 	if (!lim->max_write_zeroes_sectors)
155 		return NULL;
156 	if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
157 		return NULL;
158 	return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
159 }
160 
161 /*
162  * Return the maximum number of sectors from the start of a bio that may be
163  * submitted as a single request to a block device. If enough sectors remain,
164  * align the end to the physical block size. Otherwise align the end to the
165  * logical block size. This approach minimizes the number of non-aligned
166  * requests that are submitted to a block device if the start of a bio is not
167  * aligned to a physical block boundary.
168  */
169 static inline unsigned get_max_io_size(struct bio *bio,
170 				       const struct queue_limits *lim)
171 {
172 	unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
173 	unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
174 	unsigned max_sectors = lim->max_sectors, start, end;
175 
176 	if (lim->chunk_sectors) {
177 		max_sectors = min(max_sectors,
178 			blk_chunk_sectors_left(bio->bi_iter.bi_sector,
179 					       lim->chunk_sectors));
180 	}
181 
182 	start = bio->bi_iter.bi_sector & (pbs - 1);
183 	end = (start + max_sectors) & ~(pbs - 1);
184 	if (end > start)
185 		return end - start;
186 	return max_sectors & ~(lbs - 1);
187 }
188 
189 /**
190  * get_max_segment_size() - maximum number of bytes to add as a single segment
191  * @lim: Request queue limits.
192  * @start_page: See below.
193  * @offset: Offset from @start_page where to add a segment.
194  *
195  * Returns the maximum number of bytes that can be added as a single segment.
196  */
197 static inline unsigned get_max_segment_size(const struct queue_limits *lim,
198 		struct page *start_page, unsigned long offset)
199 {
200 	unsigned long mask = lim->seg_boundary_mask;
201 
202 	offset = mask & (page_to_phys(start_page) + offset);
203 
204 	/*
205 	 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
206 	 * after having calculated the minimum.
207 	 */
208 	return min(mask - offset, (unsigned long)lim->max_segment_size - 1) + 1;
209 }
210 
211 /**
212  * bvec_split_segs - verify whether or not a bvec should be split in the middle
213  * @lim:      [in] queue limits to split based on
214  * @bv:       [in] bvec to examine
215  * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
216  *            by the number of segments from @bv that may be appended to that
217  *            bio without exceeding @max_segs
218  * @bytes:    [in,out] Number of bytes in the bio being built. Incremented
219  *            by the number of bytes from @bv that may be appended to that
220  *            bio without exceeding @max_bytes
221  * @max_segs: [in] upper bound for *@nsegs
222  * @max_bytes: [in] upper bound for *@bytes
223  *
224  * When splitting a bio, it can happen that a bvec is encountered that is too
225  * big to fit in a single segment and hence that it has to be split in the
226  * middle. This function verifies whether or not that should happen. The value
227  * %true is returned if and only if appending the entire @bv to a bio with
228  * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
229  * the block driver.
230  */
231 static bool bvec_split_segs(const struct queue_limits *lim,
232 		const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
233 		unsigned max_segs, unsigned max_bytes)
234 {
235 	unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
236 	unsigned len = min(bv->bv_len, max_len);
237 	unsigned total_len = 0;
238 	unsigned seg_size = 0;
239 
240 	while (len && *nsegs < max_segs) {
241 		seg_size = get_max_segment_size(lim, bv->bv_page,
242 						bv->bv_offset + total_len);
243 		seg_size = min(seg_size, len);
244 
245 		(*nsegs)++;
246 		total_len += seg_size;
247 		len -= seg_size;
248 
249 		if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
250 			break;
251 	}
252 
253 	*bytes += total_len;
254 
255 	/* tell the caller to split the bvec if it is too big to fit */
256 	return len > 0 || bv->bv_len > max_len;
257 }
258 
259 /**
260  * bio_split_rw - split a bio in two bios
261  * @bio:  [in] bio to be split
262  * @lim:  [in] queue limits to split based on
263  * @segs: [out] number of segments in the bio with the first half of the sectors
264  * @bs:	  [in] bio set to allocate the clone from
265  * @max_bytes: [in] maximum number of bytes per bio
266  *
267  * Clone @bio, update the bi_iter of the clone to represent the first sectors
268  * of @bio and update @bio->bi_iter to represent the remaining sectors. The
269  * following is guaranteed for the cloned bio:
270  * - That it has at most @max_bytes worth of data
271  * - That it has at most queue_max_segments(@q) segments.
272  *
273  * Except for discard requests the cloned bio will point at the bi_io_vec of
274  * the original bio. It is the responsibility of the caller to ensure that the
275  * original bio is not freed before the cloned bio. The caller is also
276  * responsible for ensuring that @bs is only destroyed after processing of the
277  * split bio has finished.
278  */
279 static struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
280 		unsigned *segs, struct bio_set *bs, unsigned max_bytes)
281 {
282 	struct bio_vec bv, bvprv, *bvprvp = NULL;
283 	struct bvec_iter iter;
284 	unsigned nsegs = 0, bytes = 0;
285 
286 	bio_for_each_bvec(bv, bio, iter) {
287 		/*
288 		 * If the queue doesn't support SG gaps and adding this
289 		 * offset would create a gap, disallow it.
290 		 */
291 		if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
292 			goto split;
293 
294 		if (nsegs < lim->max_segments &&
295 		    bytes + bv.bv_len <= max_bytes &&
296 		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
297 			nsegs++;
298 			bytes += bv.bv_len;
299 		} else {
300 			if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
301 					lim->max_segments, max_bytes))
302 				goto split;
303 		}
304 
305 		bvprv = bv;
306 		bvprvp = &bvprv;
307 	}
308 
309 	*segs = nsegs;
310 	return NULL;
311 split:
312 	*segs = nsegs;
313 
314 	/*
315 	 * Individual bvecs might not be logical block aligned. Round down the
316 	 * split size so that each bio is properly block size aligned, even if
317 	 * we do not use the full hardware limits.
318 	 */
319 	bytes = ALIGN_DOWN(bytes, lim->logical_block_size);
320 
321 	/*
322 	 * Bio splitting may cause subtle trouble such as hang when doing sync
323 	 * iopoll in direct IO routine. Given performance gain of iopoll for
324 	 * big IO can be trival, disable iopoll when split needed.
325 	 */
326 	bio_clear_polled(bio);
327 	return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
328 }
329 
330 /**
331  * __bio_split_to_limits - split a bio to fit the queue limits
332  * @bio:     bio to be split
333  * @lim:     queue limits to split based on
334  * @nr_segs: returns the number of segments in the returned bio
335  *
336  * Check if @bio needs splitting based on the queue limits, and if so split off
337  * a bio fitting the limits from the beginning of @bio and return it.  @bio is
338  * shortened to the remainder and re-submitted.
339  *
340  * The split bio is allocated from @q->bio_split, which is provided by the
341  * block layer.
342  */
343 struct bio *__bio_split_to_limits(struct bio *bio,
344 				  const struct queue_limits *lim,
345 				  unsigned int *nr_segs)
346 {
347 	struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
348 	struct bio *split;
349 
350 	switch (bio_op(bio)) {
351 	case REQ_OP_DISCARD:
352 	case REQ_OP_SECURE_ERASE:
353 		split = bio_split_discard(bio, lim, nr_segs, bs);
354 		break;
355 	case REQ_OP_WRITE_ZEROES:
356 		split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
357 		break;
358 	default:
359 		split = bio_split_rw(bio, lim, nr_segs, bs,
360 				get_max_io_size(bio, lim) << SECTOR_SHIFT);
361 		if (IS_ERR(split))
362 			return NULL;
363 		break;
364 	}
365 
366 	if (split) {
367 		/* there isn't chance to merge the split bio */
368 		split->bi_opf |= REQ_NOMERGE;
369 
370 		blkcg_bio_issue_init(split);
371 		bio_chain(split, bio);
372 		trace_block_split(split, bio->bi_iter.bi_sector);
373 		submit_bio_noacct(bio);
374 		return split;
375 	}
376 	return bio;
377 }
378 
379 /**
380  * bio_split_to_limits - split a bio to fit the queue limits
381  * @bio:     bio to be split
382  *
383  * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
384  * if so split off a bio fitting the limits from the beginning of @bio and
385  * return it.  @bio is shortened to the remainder and re-submitted.
386  *
387  * The split bio is allocated from @q->bio_split, which is provided by the
388  * block layer.
389  */
390 struct bio *bio_split_to_limits(struct bio *bio)
391 {
392 	const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
393 	unsigned int nr_segs;
394 
395 	if (bio_may_exceed_limits(bio, lim))
396 		return __bio_split_to_limits(bio, lim, &nr_segs);
397 	return bio;
398 }
399 EXPORT_SYMBOL(bio_split_to_limits);
400 
401 unsigned int blk_recalc_rq_segments(struct request *rq)
402 {
403 	unsigned int nr_phys_segs = 0;
404 	unsigned int bytes = 0;
405 	struct req_iterator iter;
406 	struct bio_vec bv;
407 
408 	if (!rq->bio)
409 		return 0;
410 
411 	switch (bio_op(rq->bio)) {
412 	case REQ_OP_DISCARD:
413 	case REQ_OP_SECURE_ERASE:
414 		if (queue_max_discard_segments(rq->q) > 1) {
415 			struct bio *bio = rq->bio;
416 
417 			for_each_bio(bio)
418 				nr_phys_segs++;
419 			return nr_phys_segs;
420 		}
421 		return 1;
422 	case REQ_OP_WRITE_ZEROES:
423 		return 0;
424 	default:
425 		break;
426 	}
427 
428 	rq_for_each_bvec(bv, rq, iter)
429 		bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
430 				UINT_MAX, UINT_MAX);
431 	return nr_phys_segs;
432 }
433 
434 static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
435 		struct scatterlist *sglist)
436 {
437 	if (!*sg)
438 		return sglist;
439 
440 	/*
441 	 * If the driver previously mapped a shorter list, we could see a
442 	 * termination bit prematurely unless it fully inits the sg table
443 	 * on each mapping. We KNOW that there must be more entries here
444 	 * or the driver would be buggy, so force clear the termination bit
445 	 * to avoid doing a full sg_init_table() in drivers for each command.
446 	 */
447 	sg_unmark_end(*sg);
448 	return sg_next(*sg);
449 }
450 
451 static unsigned blk_bvec_map_sg(struct request_queue *q,
452 		struct bio_vec *bvec, struct scatterlist *sglist,
453 		struct scatterlist **sg)
454 {
455 	unsigned nbytes = bvec->bv_len;
456 	unsigned nsegs = 0, total = 0;
457 
458 	while (nbytes > 0) {
459 		unsigned offset = bvec->bv_offset + total;
460 		unsigned len = min(get_max_segment_size(&q->limits,
461 				   bvec->bv_page, offset), nbytes);
462 		struct page *page = bvec->bv_page;
463 
464 		/*
465 		 * Unfortunately a fair number of drivers barf on scatterlists
466 		 * that have an offset larger than PAGE_SIZE, despite other
467 		 * subsystems dealing with that invariant just fine.  For now
468 		 * stick to the legacy format where we never present those from
469 		 * the block layer, but the code below should be removed once
470 		 * these offenders (mostly MMC/SD drivers) are fixed.
471 		 */
472 		page += (offset >> PAGE_SHIFT);
473 		offset &= ~PAGE_MASK;
474 
475 		*sg = blk_next_sg(sg, sglist);
476 		sg_set_page(*sg, page, len, offset);
477 
478 		total += len;
479 		nbytes -= len;
480 		nsegs++;
481 	}
482 
483 	return nsegs;
484 }
485 
486 static inline int __blk_bvec_map_sg(struct bio_vec bv,
487 		struct scatterlist *sglist, struct scatterlist **sg)
488 {
489 	*sg = blk_next_sg(sg, sglist);
490 	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
491 	return 1;
492 }
493 
494 /* only try to merge bvecs into one sg if they are from two bios */
495 static inline bool
496 __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
497 			   struct bio_vec *bvprv, struct scatterlist **sg)
498 {
499 
500 	int nbytes = bvec->bv_len;
501 
502 	if (!*sg)
503 		return false;
504 
505 	if ((*sg)->length + nbytes > queue_max_segment_size(q))
506 		return false;
507 
508 	if (!biovec_phys_mergeable(q, bvprv, bvec))
509 		return false;
510 
511 	(*sg)->length += nbytes;
512 
513 	return true;
514 }
515 
516 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
517 			     struct scatterlist *sglist,
518 			     struct scatterlist **sg)
519 {
520 	struct bio_vec bvec, bvprv = { NULL };
521 	struct bvec_iter iter;
522 	int nsegs = 0;
523 	bool new_bio = false;
524 
525 	for_each_bio(bio) {
526 		bio_for_each_bvec(bvec, bio, iter) {
527 			/*
528 			 * Only try to merge bvecs from two bios given we
529 			 * have done bio internal merge when adding pages
530 			 * to bio
531 			 */
532 			if (new_bio &&
533 			    __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
534 				goto next_bvec;
535 
536 			if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
537 				nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
538 			else
539 				nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
540  next_bvec:
541 			new_bio = false;
542 		}
543 		if (likely(bio->bi_iter.bi_size)) {
544 			bvprv = bvec;
545 			new_bio = true;
546 		}
547 	}
548 
549 	return nsegs;
550 }
551 
552 /*
553  * map a request to scatterlist, return number of sg entries setup. Caller
554  * must make sure sg can hold rq->nr_phys_segments entries
555  */
556 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
557 		struct scatterlist *sglist, struct scatterlist **last_sg)
558 {
559 	int nsegs = 0;
560 
561 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
562 		nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
563 	else if (rq->bio)
564 		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
565 
566 	if (*last_sg)
567 		sg_mark_end(*last_sg);
568 
569 	/*
570 	 * Something must have been wrong if the figured number of
571 	 * segment is bigger than number of req's physical segments
572 	 */
573 	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
574 
575 	return nsegs;
576 }
577 EXPORT_SYMBOL(__blk_rq_map_sg);
578 
579 static inline unsigned int blk_rq_get_max_segments(struct request *rq)
580 {
581 	if (req_op(rq) == REQ_OP_DISCARD)
582 		return queue_max_discard_segments(rq->q);
583 	return queue_max_segments(rq->q);
584 }
585 
586 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
587 						  sector_t offset)
588 {
589 	struct request_queue *q = rq->q;
590 	unsigned int max_sectors;
591 
592 	if (blk_rq_is_passthrough(rq))
593 		return q->limits.max_hw_sectors;
594 
595 	max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
596 	if (!q->limits.chunk_sectors ||
597 	    req_op(rq) == REQ_OP_DISCARD ||
598 	    req_op(rq) == REQ_OP_SECURE_ERASE)
599 		return max_sectors;
600 	return min(max_sectors,
601 		   blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
602 }
603 
604 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
605 		unsigned int nr_phys_segs)
606 {
607 	if (!blk_cgroup_mergeable(req, bio))
608 		goto no_merge;
609 
610 	if (blk_integrity_merge_bio(req->q, req, bio) == false)
611 		goto no_merge;
612 
613 	/* discard request merge won't add new segment */
614 	if (req_op(req) == REQ_OP_DISCARD)
615 		return 1;
616 
617 	if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
618 		goto no_merge;
619 
620 	/*
621 	 * This will form the start of a new hw segment.  Bump both
622 	 * counters.
623 	 */
624 	req->nr_phys_segments += nr_phys_segs;
625 	return 1;
626 
627 no_merge:
628 	req_set_nomerge(req->q, req);
629 	return 0;
630 }
631 
632 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
633 {
634 	if (req_gap_back_merge(req, bio))
635 		return 0;
636 	if (blk_integrity_rq(req) &&
637 	    integrity_req_gap_back_merge(req, bio))
638 		return 0;
639 	if (!bio_crypt_ctx_back_mergeable(req, bio))
640 		return 0;
641 	if (blk_rq_sectors(req) + bio_sectors(bio) >
642 	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
643 		req_set_nomerge(req->q, req);
644 		return 0;
645 	}
646 
647 	return ll_new_hw_segment(req, bio, nr_segs);
648 }
649 
650 static int ll_front_merge_fn(struct request *req, struct bio *bio,
651 		unsigned int nr_segs)
652 {
653 	if (req_gap_front_merge(req, bio))
654 		return 0;
655 	if (blk_integrity_rq(req) &&
656 	    integrity_req_gap_front_merge(req, bio))
657 		return 0;
658 	if (!bio_crypt_ctx_front_mergeable(req, bio))
659 		return 0;
660 	if (blk_rq_sectors(req) + bio_sectors(bio) >
661 	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
662 		req_set_nomerge(req->q, req);
663 		return 0;
664 	}
665 
666 	return ll_new_hw_segment(req, bio, nr_segs);
667 }
668 
669 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
670 		struct request *next)
671 {
672 	unsigned short segments = blk_rq_nr_discard_segments(req);
673 
674 	if (segments >= queue_max_discard_segments(q))
675 		goto no_merge;
676 	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
677 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
678 		goto no_merge;
679 
680 	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
681 	return true;
682 no_merge:
683 	req_set_nomerge(q, req);
684 	return false;
685 }
686 
687 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
688 				struct request *next)
689 {
690 	int total_phys_segments;
691 
692 	if (req_gap_back_merge(req, next->bio))
693 		return 0;
694 
695 	/*
696 	 * Will it become too large?
697 	 */
698 	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
699 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
700 		return 0;
701 
702 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
703 	if (total_phys_segments > blk_rq_get_max_segments(req))
704 		return 0;
705 
706 	if (!blk_cgroup_mergeable(req, next->bio))
707 		return 0;
708 
709 	if (blk_integrity_merge_rq(q, req, next) == false)
710 		return 0;
711 
712 	if (!bio_crypt_ctx_merge_rq(req, next))
713 		return 0;
714 
715 	/* Merge is OK... */
716 	req->nr_phys_segments = total_phys_segments;
717 	return 1;
718 }
719 
720 /**
721  * blk_rq_set_mixed_merge - mark a request as mixed merge
722  * @rq: request to mark as mixed merge
723  *
724  * Description:
725  *     @rq is about to be mixed merged.  Make sure the attributes
726  *     which can be mixed are set in each bio and mark @rq as mixed
727  *     merged.
728  */
729 void blk_rq_set_mixed_merge(struct request *rq)
730 {
731 	blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
732 	struct bio *bio;
733 
734 	if (rq->rq_flags & RQF_MIXED_MERGE)
735 		return;
736 
737 	/*
738 	 * @rq will no longer represent mixable attributes for all the
739 	 * contained bios.  It will just track those of the first one.
740 	 * Distributes the attributs to each bio.
741 	 */
742 	for (bio = rq->bio; bio; bio = bio->bi_next) {
743 		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
744 			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
745 		bio->bi_opf |= ff;
746 	}
747 	rq->rq_flags |= RQF_MIXED_MERGE;
748 }
749 
750 static void blk_account_io_merge_request(struct request *req)
751 {
752 	if (blk_do_io_stat(req)) {
753 		part_stat_lock();
754 		part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
755 		part_stat_unlock();
756 	}
757 }
758 
759 static enum elv_merge blk_try_req_merge(struct request *req,
760 					struct request *next)
761 {
762 	if (blk_discard_mergable(req))
763 		return ELEVATOR_DISCARD_MERGE;
764 	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
765 		return ELEVATOR_BACK_MERGE;
766 
767 	return ELEVATOR_NO_MERGE;
768 }
769 
770 /*
771  * For non-mq, this has to be called with the request spinlock acquired.
772  * For mq with scheduling, the appropriate queue wide lock should be held.
773  */
774 static struct request *attempt_merge(struct request_queue *q,
775 				     struct request *req, struct request *next)
776 {
777 	if (!rq_mergeable(req) || !rq_mergeable(next))
778 		return NULL;
779 
780 	if (req_op(req) != req_op(next))
781 		return NULL;
782 
783 	if (rq_data_dir(req) != rq_data_dir(next))
784 		return NULL;
785 
786 	if (req->ioprio != next->ioprio)
787 		return NULL;
788 
789 	/*
790 	 * If we are allowed to merge, then append bio list
791 	 * from next to rq and release next. merge_requests_fn
792 	 * will have updated segment counts, update sector
793 	 * counts here. Handle DISCARDs separately, as they
794 	 * have separate settings.
795 	 */
796 
797 	switch (blk_try_req_merge(req, next)) {
798 	case ELEVATOR_DISCARD_MERGE:
799 		if (!req_attempt_discard_merge(q, req, next))
800 			return NULL;
801 		break;
802 	case ELEVATOR_BACK_MERGE:
803 		if (!ll_merge_requests_fn(q, req, next))
804 			return NULL;
805 		break;
806 	default:
807 		return NULL;
808 	}
809 
810 	/*
811 	 * If failfast settings disagree or any of the two is already
812 	 * a mixed merge, mark both as mixed before proceeding.  This
813 	 * makes sure that all involved bios have mixable attributes
814 	 * set properly.
815 	 */
816 	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
817 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
818 	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
819 		blk_rq_set_mixed_merge(req);
820 		blk_rq_set_mixed_merge(next);
821 	}
822 
823 	/*
824 	 * At this point we have either done a back merge or front merge. We
825 	 * need the smaller start_time_ns of the merged requests to be the
826 	 * current request for accounting purposes.
827 	 */
828 	if (next->start_time_ns < req->start_time_ns)
829 		req->start_time_ns = next->start_time_ns;
830 
831 	req->biotail->bi_next = next->bio;
832 	req->biotail = next->biotail;
833 
834 	req->__data_len += blk_rq_bytes(next);
835 
836 	if (!blk_discard_mergable(req))
837 		elv_merge_requests(q, req, next);
838 
839 	/*
840 	 * 'next' is going away, so update stats accordingly
841 	 */
842 	blk_account_io_merge_request(next);
843 
844 	trace_block_rq_merge(next);
845 
846 	/*
847 	 * ownership of bio passed from next to req, return 'next' for
848 	 * the caller to free
849 	 */
850 	next->bio = NULL;
851 	return next;
852 }
853 
854 static struct request *attempt_back_merge(struct request_queue *q,
855 		struct request *rq)
856 {
857 	struct request *next = elv_latter_request(q, rq);
858 
859 	if (next)
860 		return attempt_merge(q, rq, next);
861 
862 	return NULL;
863 }
864 
865 static struct request *attempt_front_merge(struct request_queue *q,
866 		struct request *rq)
867 {
868 	struct request *prev = elv_former_request(q, rq);
869 
870 	if (prev)
871 		return attempt_merge(q, prev, rq);
872 
873 	return NULL;
874 }
875 
876 /*
877  * Try to merge 'next' into 'rq'. Return true if the merge happened, false
878  * otherwise. The caller is responsible for freeing 'next' if the merge
879  * happened.
880  */
881 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
882 			   struct request *next)
883 {
884 	return attempt_merge(q, rq, next);
885 }
886 
887 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
888 {
889 	if (!rq_mergeable(rq) || !bio_mergeable(bio))
890 		return false;
891 
892 	if (req_op(rq) != bio_op(bio))
893 		return false;
894 
895 	/* different data direction or already started, don't merge */
896 	if (bio_data_dir(bio) != rq_data_dir(rq))
897 		return false;
898 
899 	/* don't merge across cgroup boundaries */
900 	if (!blk_cgroup_mergeable(rq, bio))
901 		return false;
902 
903 	/* only merge integrity protected bio into ditto rq */
904 	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
905 		return false;
906 
907 	/* Only merge if the crypt contexts are compatible */
908 	if (!bio_crypt_rq_ctx_compatible(rq, bio))
909 		return false;
910 
911 	if (rq->ioprio != bio_prio(bio))
912 		return false;
913 
914 	return true;
915 }
916 
917 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
918 {
919 	if (blk_discard_mergable(rq))
920 		return ELEVATOR_DISCARD_MERGE;
921 	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
922 		return ELEVATOR_BACK_MERGE;
923 	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
924 		return ELEVATOR_FRONT_MERGE;
925 	return ELEVATOR_NO_MERGE;
926 }
927 
928 static void blk_account_io_merge_bio(struct request *req)
929 {
930 	if (!blk_do_io_stat(req))
931 		return;
932 
933 	part_stat_lock();
934 	part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
935 	part_stat_unlock();
936 }
937 
938 enum bio_merge_status {
939 	BIO_MERGE_OK,
940 	BIO_MERGE_NONE,
941 	BIO_MERGE_FAILED,
942 };
943 
944 static enum bio_merge_status bio_attempt_back_merge(struct request *req,
945 		struct bio *bio, unsigned int nr_segs)
946 {
947 	const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
948 
949 	if (!ll_back_merge_fn(req, bio, nr_segs))
950 		return BIO_MERGE_FAILED;
951 
952 	trace_block_bio_backmerge(bio);
953 	rq_qos_merge(req->q, req, bio);
954 
955 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
956 		blk_rq_set_mixed_merge(req);
957 
958 	req->biotail->bi_next = bio;
959 	req->biotail = bio;
960 	req->__data_len += bio->bi_iter.bi_size;
961 
962 	bio_crypt_free_ctx(bio);
963 
964 	blk_account_io_merge_bio(req);
965 	return BIO_MERGE_OK;
966 }
967 
968 static enum bio_merge_status bio_attempt_front_merge(struct request *req,
969 		struct bio *bio, unsigned int nr_segs)
970 {
971 	const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
972 
973 	if (!ll_front_merge_fn(req, bio, nr_segs))
974 		return BIO_MERGE_FAILED;
975 
976 	trace_block_bio_frontmerge(bio);
977 	rq_qos_merge(req->q, req, bio);
978 
979 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
980 		blk_rq_set_mixed_merge(req);
981 
982 	bio->bi_next = req->bio;
983 	req->bio = bio;
984 
985 	req->__sector = bio->bi_iter.bi_sector;
986 	req->__data_len += bio->bi_iter.bi_size;
987 
988 	bio_crypt_do_front_merge(req, bio);
989 
990 	blk_account_io_merge_bio(req);
991 	return BIO_MERGE_OK;
992 }
993 
994 static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
995 		struct request *req, struct bio *bio)
996 {
997 	unsigned short segments = blk_rq_nr_discard_segments(req);
998 
999 	if (segments >= queue_max_discard_segments(q))
1000 		goto no_merge;
1001 	if (blk_rq_sectors(req) + bio_sectors(bio) >
1002 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1003 		goto no_merge;
1004 
1005 	rq_qos_merge(q, req, bio);
1006 
1007 	req->biotail->bi_next = bio;
1008 	req->biotail = bio;
1009 	req->__data_len += bio->bi_iter.bi_size;
1010 	req->nr_phys_segments = segments + 1;
1011 
1012 	blk_account_io_merge_bio(req);
1013 	return BIO_MERGE_OK;
1014 no_merge:
1015 	req_set_nomerge(q, req);
1016 	return BIO_MERGE_FAILED;
1017 }
1018 
1019 static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
1020 						   struct request *rq,
1021 						   struct bio *bio,
1022 						   unsigned int nr_segs,
1023 						   bool sched_allow_merge)
1024 {
1025 	if (!blk_rq_merge_ok(rq, bio))
1026 		return BIO_MERGE_NONE;
1027 
1028 	switch (blk_try_merge(rq, bio)) {
1029 	case ELEVATOR_BACK_MERGE:
1030 		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1031 			return bio_attempt_back_merge(rq, bio, nr_segs);
1032 		break;
1033 	case ELEVATOR_FRONT_MERGE:
1034 		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1035 			return bio_attempt_front_merge(rq, bio, nr_segs);
1036 		break;
1037 	case ELEVATOR_DISCARD_MERGE:
1038 		return bio_attempt_discard_merge(q, rq, bio);
1039 	default:
1040 		return BIO_MERGE_NONE;
1041 	}
1042 
1043 	return BIO_MERGE_FAILED;
1044 }
1045 
1046 /**
1047  * blk_attempt_plug_merge - try to merge with %current's plugged list
1048  * @q: request_queue new bio is being queued at
1049  * @bio: new bio being queued
1050  * @nr_segs: number of segments in @bio
1051  * from the passed in @q already in the plug list
1052  *
1053  * Determine whether @bio being queued on @q can be merged with the previous
1054  * request on %current's plugged list.  Returns %true if merge was successful,
1055  * otherwise %false.
1056  *
1057  * Plugging coalesces IOs from the same issuer for the same purpose without
1058  * going through @q->queue_lock.  As such it's more of an issuing mechanism
1059  * than scheduling, and the request, while may have elvpriv data, is not
1060  * added on the elevator at this point.  In addition, we don't have
1061  * reliable access to the elevator outside queue lock.  Only check basic
1062  * merging parameters without querying the elevator.
1063  *
1064  * Caller must ensure !blk_queue_nomerges(q) beforehand.
1065  */
1066 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1067 		unsigned int nr_segs)
1068 {
1069 	struct blk_plug *plug;
1070 	struct request *rq;
1071 
1072 	plug = blk_mq_plug(bio);
1073 	if (!plug || rq_list_empty(plug->mq_list))
1074 		return false;
1075 
1076 	rq_list_for_each(&plug->mq_list, rq) {
1077 		if (rq->q == q) {
1078 			if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1079 			    BIO_MERGE_OK)
1080 				return true;
1081 			break;
1082 		}
1083 
1084 		/*
1085 		 * Only keep iterating plug list for merges if we have multiple
1086 		 * queues
1087 		 */
1088 		if (!plug->multiple_queues)
1089 			break;
1090 	}
1091 	return false;
1092 }
1093 
1094 /*
1095  * Iterate list of requests and see if we can merge this bio with any
1096  * of them.
1097  */
1098 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1099 			struct bio *bio, unsigned int nr_segs)
1100 {
1101 	struct request *rq;
1102 	int checked = 8;
1103 
1104 	list_for_each_entry_reverse(rq, list, queuelist) {
1105 		if (!checked--)
1106 			break;
1107 
1108 		switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1109 		case BIO_MERGE_NONE:
1110 			continue;
1111 		case BIO_MERGE_OK:
1112 			return true;
1113 		case BIO_MERGE_FAILED:
1114 			return false;
1115 		}
1116 
1117 	}
1118 
1119 	return false;
1120 }
1121 EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1122 
1123 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1124 		unsigned int nr_segs, struct request **merged_request)
1125 {
1126 	struct request *rq;
1127 
1128 	switch (elv_merge(q, &rq, bio)) {
1129 	case ELEVATOR_BACK_MERGE:
1130 		if (!blk_mq_sched_allow_merge(q, rq, bio))
1131 			return false;
1132 		if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1133 			return false;
1134 		*merged_request = attempt_back_merge(q, rq);
1135 		if (!*merged_request)
1136 			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1137 		return true;
1138 	case ELEVATOR_FRONT_MERGE:
1139 		if (!blk_mq_sched_allow_merge(q, rq, bio))
1140 			return false;
1141 		if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1142 			return false;
1143 		*merged_request = attempt_front_merge(q, rq);
1144 		if (!*merged_request)
1145 			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1146 		return true;
1147 	case ELEVATOR_DISCARD_MERGE:
1148 		return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1149 	default:
1150 		return false;
1151 	}
1152 }
1153 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
1154