xref: /openbmc/linux/block/blk-merge.c (revision a85b3637)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to segment and merge handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/scatterlist.h>
11 #include <linux/part_stat.h>
12 #include <linux/blk-cgroup.h>
13 
14 #include <trace/events/block.h>
15 
16 #include "blk.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-throttle.h"
20 
21 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
22 {
23 	*bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
24 }
25 
26 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
27 {
28 	struct bvec_iter iter = bio->bi_iter;
29 	int idx;
30 
31 	bio_get_first_bvec(bio, bv);
32 	if (bv->bv_len == bio->bi_iter.bi_size)
33 		return;		/* this bio only has a single bvec */
34 
35 	bio_advance_iter(bio, &iter, iter.bi_size);
36 
37 	if (!iter.bi_bvec_done)
38 		idx = iter.bi_idx - 1;
39 	else	/* in the middle of bvec */
40 		idx = iter.bi_idx;
41 
42 	*bv = bio->bi_io_vec[idx];
43 
44 	/*
45 	 * iter.bi_bvec_done records actual length of the last bvec
46 	 * if this bio ends in the middle of one io vector
47 	 */
48 	if (iter.bi_bvec_done)
49 		bv->bv_len = iter.bi_bvec_done;
50 }
51 
52 static inline bool bio_will_gap(struct request_queue *q,
53 		struct request *prev_rq, struct bio *prev, struct bio *next)
54 {
55 	struct bio_vec pb, nb;
56 
57 	if (!bio_has_data(prev) || !queue_virt_boundary(q))
58 		return false;
59 
60 	/*
61 	 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
62 	 * is quite difficult to respect the sg gap limit.  We work hard to
63 	 * merge a huge number of small single bios in case of mkfs.
64 	 */
65 	if (prev_rq)
66 		bio_get_first_bvec(prev_rq->bio, &pb);
67 	else
68 		bio_get_first_bvec(prev, &pb);
69 	if (pb.bv_offset & queue_virt_boundary(q))
70 		return true;
71 
72 	/*
73 	 * We don't need to worry about the situation that the merged segment
74 	 * ends in unaligned virt boundary:
75 	 *
76 	 * - if 'pb' ends aligned, the merged segment ends aligned
77 	 * - if 'pb' ends unaligned, the next bio must include
78 	 *   one single bvec of 'nb', otherwise the 'nb' can't
79 	 *   merge with 'pb'
80 	 */
81 	bio_get_last_bvec(prev, &pb);
82 	bio_get_first_bvec(next, &nb);
83 	if (biovec_phys_mergeable(q, &pb, &nb))
84 		return false;
85 	return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
86 }
87 
88 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
89 {
90 	return bio_will_gap(req->q, req, req->biotail, bio);
91 }
92 
93 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
94 {
95 	return bio_will_gap(req->q, NULL, bio, req->bio);
96 }
97 
98 static struct bio *bio_split_discard(struct bio *bio, struct request_queue *q,
99 		unsigned *nsegs, struct bio_set *bs)
100 {
101 	unsigned int max_discard_sectors, granularity;
102 	int alignment;
103 	sector_t tmp;
104 	unsigned split_sectors;
105 
106 	*nsegs = 1;
107 
108 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
109 	granularity = max(q->limits.discard_granularity >> 9, 1U);
110 
111 	max_discard_sectors = min(q->limits.max_discard_sectors,
112 			bio_allowed_max_sectors(q));
113 	max_discard_sectors -= max_discard_sectors % granularity;
114 
115 	if (unlikely(!max_discard_sectors)) {
116 		/* XXX: warn */
117 		return NULL;
118 	}
119 
120 	if (bio_sectors(bio) <= max_discard_sectors)
121 		return NULL;
122 
123 	split_sectors = max_discard_sectors;
124 
125 	/*
126 	 * If the next starting sector would be misaligned, stop the discard at
127 	 * the previous aligned sector.
128 	 */
129 	alignment = (q->limits.discard_alignment >> 9) % granularity;
130 
131 	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
132 	tmp = sector_div(tmp, granularity);
133 
134 	if (split_sectors > tmp)
135 		split_sectors -= tmp;
136 
137 	return bio_split(bio, split_sectors, GFP_NOIO, bs);
138 }
139 
140 static struct bio *bio_split_write_zeroes(struct bio *bio,
141 		struct request_queue *q, unsigned *nsegs, struct bio_set *bs)
142 {
143 	*nsegs = 0;
144 
145 	if (!q->limits.max_write_zeroes_sectors)
146 		return NULL;
147 
148 	if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
149 		return NULL;
150 
151 	return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
152 }
153 
154 /*
155  * Return the maximum number of sectors from the start of a bio that may be
156  * submitted as a single request to a block device. If enough sectors remain,
157  * align the end to the physical block size. Otherwise align the end to the
158  * logical block size. This approach minimizes the number of non-aligned
159  * requests that are submitted to a block device if the start of a bio is not
160  * aligned to a physical block boundary.
161  */
162 static inline unsigned get_max_io_size(struct bio *bio,
163 		struct request_queue *q)
164 {
165 	unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
166 	unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
167 	unsigned max_sectors = queue_max_sectors(q), start, end;
168 
169 	if (q->limits.chunk_sectors) {
170 		max_sectors = min(max_sectors,
171 			blk_chunk_sectors_left(bio->bi_iter.bi_sector,
172 					       q->limits.chunk_sectors));
173 	}
174 
175 	start = bio->bi_iter.bi_sector & (pbs - 1);
176 	end = (start + max_sectors) & ~(pbs - 1);
177 	if (end > start)
178 		return end - start;
179 	return max_sectors & ~(lbs - 1);
180 }
181 
182 static inline unsigned get_max_segment_size(const struct request_queue *q,
183 					    struct page *start_page,
184 					    unsigned long offset)
185 {
186 	unsigned long mask = queue_segment_boundary(q);
187 
188 	offset = mask & (page_to_phys(start_page) + offset);
189 
190 	/*
191 	 * overflow may be triggered in case of zero page physical address
192 	 * on 32bit arch, use queue's max segment size when that happens.
193 	 */
194 	return min_not_zero(mask - offset + 1,
195 			(unsigned long)queue_max_segment_size(q));
196 }
197 
198 /**
199  * bvec_split_segs - verify whether or not a bvec should be split in the middle
200  * @q:        [in] request queue associated with the bio associated with @bv
201  * @bv:       [in] bvec to examine
202  * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
203  *            by the number of segments from @bv that may be appended to that
204  *            bio without exceeding @max_segs
205  * @bytes:    [in,out] Number of bytes in the bio being built. Incremented
206  *            by the number of bytes from @bv that may be appended to that
207  *            bio without exceeding @max_bytes
208  * @max_segs: [in] upper bound for *@nsegs
209  * @max_bytes: [in] upper bound for *@bytes
210  *
211  * When splitting a bio, it can happen that a bvec is encountered that is too
212  * big to fit in a single segment and hence that it has to be split in the
213  * middle. This function verifies whether or not that should happen. The value
214  * %true is returned if and only if appending the entire @bv to a bio with
215  * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
216  * the block driver.
217  */
218 static bool bvec_split_segs(const struct request_queue *q,
219 			    const struct bio_vec *bv, unsigned *nsegs,
220 			    unsigned *bytes, unsigned max_segs,
221 			    unsigned max_bytes)
222 {
223 	unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
224 	unsigned len = min(bv->bv_len, max_len);
225 	unsigned total_len = 0;
226 	unsigned seg_size = 0;
227 
228 	while (len && *nsegs < max_segs) {
229 		seg_size = get_max_segment_size(q, bv->bv_page,
230 						bv->bv_offset + total_len);
231 		seg_size = min(seg_size, len);
232 
233 		(*nsegs)++;
234 		total_len += seg_size;
235 		len -= seg_size;
236 
237 		if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
238 			break;
239 	}
240 
241 	*bytes += total_len;
242 
243 	/* tell the caller to split the bvec if it is too big to fit */
244 	return len > 0 || bv->bv_len > max_len;
245 }
246 
247 /**
248  * bio_split_rw - split a bio in two bios
249  * @bio:  [in] bio to be split
250  * @q:    [in] request queue pointer
251  * @segs: [out] number of segments in the bio with the first half of the sectors
252  * @bs:	  [in] bio set to allocate the clone from
253  * @max_bytes: [in] maximum number of bytes per bio
254  *
255  * Clone @bio, update the bi_iter of the clone to represent the first sectors
256  * of @bio and update @bio->bi_iter to represent the remaining sectors. The
257  * following is guaranteed for the cloned bio:
258  * - That it has at most @max_bytes worth of data
259  * - That it has at most queue_max_segments(@q) segments.
260  *
261  * Except for discard requests the cloned bio will point at the bi_io_vec of
262  * the original bio. It is the responsibility of the caller to ensure that the
263  * original bio is not freed before the cloned bio. The caller is also
264  * responsible for ensuring that @bs is only destroyed after processing of the
265  * split bio has finished.
266  */
267 static struct bio *bio_split_rw(struct bio *bio, struct request_queue *q,
268 		unsigned *segs, struct bio_set *bs, unsigned max_bytes)
269 {
270 	struct bio_vec bv, bvprv, *bvprvp = NULL;
271 	struct bvec_iter iter;
272 	unsigned nsegs = 0, bytes = 0;
273 	const unsigned max_segs = queue_max_segments(q);
274 
275 	bio_for_each_bvec(bv, bio, iter) {
276 		/*
277 		 * If the queue doesn't support SG gaps and adding this
278 		 * offset would create a gap, disallow it.
279 		 */
280 		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
281 			goto split;
282 
283 		if (nsegs < max_segs &&
284 		    bytes + bv.bv_len <= max_bytes &&
285 		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
286 			nsegs++;
287 			bytes += bv.bv_len;
288 		} else if (bvec_split_segs(q, &bv, &nsegs, &bytes, max_segs,
289 					   max_bytes)) {
290 			goto split;
291 		}
292 
293 		bvprv = bv;
294 		bvprvp = &bvprv;
295 	}
296 
297 	*segs = nsegs;
298 	return NULL;
299 split:
300 	*segs = nsegs;
301 
302 	/*
303 	 * Individual bvecs might not be logical block aligned. Round down the
304 	 * split size so that each bio is properly block size aligned, even if
305 	 * we do not use the full hardware limits.
306 	 */
307 	bytes = ALIGN_DOWN(bytes, queue_logical_block_size(q));
308 
309 	/*
310 	 * Bio splitting may cause subtle trouble such as hang when doing sync
311 	 * iopoll in direct IO routine. Given performance gain of iopoll for
312 	 * big IO can be trival, disable iopoll when split needed.
313 	 */
314 	bio_clear_polled(bio);
315 	return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
316 }
317 
318 /**
319  * __bio_split_to_limits - split a bio to fit the queue limits
320  * @bio:     bio to be split
321  * @q:       request_queue new bio is being queued at
322  * @nr_segs: returns the number of segments in the returned bio
323  *
324  * Check if @bio needs splitting based on the queue limits, and if so split off
325  * a bio fitting the limits from the beginning of @bio and return it.  @bio is
326  * shortened to the remainder and re-submitted.
327  *
328  * The split bio is allocated from @q->bio_split, which is provided by the
329  * block layer.
330  */
331 struct bio *__bio_split_to_limits(struct bio *bio, struct request_queue *q,
332 		       unsigned int *nr_segs)
333 {
334 	struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
335 	struct bio *split;
336 
337 	switch (bio_op(bio)) {
338 	case REQ_OP_DISCARD:
339 	case REQ_OP_SECURE_ERASE:
340 		split = bio_split_discard(bio, q, nr_segs, bs);
341 		break;
342 	case REQ_OP_WRITE_ZEROES:
343 		split = bio_split_write_zeroes(bio, q, nr_segs, bs);
344 		break;
345 	default:
346 		split = bio_split_rw(bio, q, nr_segs, bs,
347 				get_max_io_size(bio, q) << SECTOR_SHIFT);
348 		break;
349 	}
350 
351 	if (split) {
352 		/* there isn't chance to merge the splitted bio */
353 		split->bi_opf |= REQ_NOMERGE;
354 
355 		blkcg_bio_issue_init(split);
356 		bio_chain(split, bio);
357 		trace_block_split(split, bio->bi_iter.bi_sector);
358 		submit_bio_noacct(bio);
359 		return split;
360 	}
361 	return bio;
362 }
363 
364 /**
365  * bio_split_to_limits - split a bio to fit the queue limits
366  * @bio:     bio to be split
367  *
368  * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
369  * if so split off a bio fitting the limits from the beginning of @bio and
370  * return it.  @bio is shortened to the remainder and re-submitted.
371  *
372  * The split bio is allocated from @q->bio_split, which is provided by the
373  * block layer.
374  */
375 struct bio *bio_split_to_limits(struct bio *bio)
376 {
377 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
378 	unsigned int nr_segs;
379 
380 	if (bio_may_exceed_limits(bio, q))
381 		return __bio_split_to_limits(bio, q, &nr_segs);
382 	return bio;
383 }
384 EXPORT_SYMBOL(bio_split_to_limits);
385 
386 unsigned int blk_recalc_rq_segments(struct request *rq)
387 {
388 	unsigned int nr_phys_segs = 0;
389 	unsigned int bytes = 0;
390 	struct req_iterator iter;
391 	struct bio_vec bv;
392 
393 	if (!rq->bio)
394 		return 0;
395 
396 	switch (bio_op(rq->bio)) {
397 	case REQ_OP_DISCARD:
398 	case REQ_OP_SECURE_ERASE:
399 		if (queue_max_discard_segments(rq->q) > 1) {
400 			struct bio *bio = rq->bio;
401 
402 			for_each_bio(bio)
403 				nr_phys_segs++;
404 			return nr_phys_segs;
405 		}
406 		return 1;
407 	case REQ_OP_WRITE_ZEROES:
408 		return 0;
409 	default:
410 		break;
411 	}
412 
413 	rq_for_each_bvec(bv, rq, iter)
414 		bvec_split_segs(rq->q, &bv, &nr_phys_segs, &bytes,
415 				UINT_MAX, UINT_MAX);
416 	return nr_phys_segs;
417 }
418 
419 static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
420 		struct scatterlist *sglist)
421 {
422 	if (!*sg)
423 		return sglist;
424 
425 	/*
426 	 * If the driver previously mapped a shorter list, we could see a
427 	 * termination bit prematurely unless it fully inits the sg table
428 	 * on each mapping. We KNOW that there must be more entries here
429 	 * or the driver would be buggy, so force clear the termination bit
430 	 * to avoid doing a full sg_init_table() in drivers for each command.
431 	 */
432 	sg_unmark_end(*sg);
433 	return sg_next(*sg);
434 }
435 
436 static unsigned blk_bvec_map_sg(struct request_queue *q,
437 		struct bio_vec *bvec, struct scatterlist *sglist,
438 		struct scatterlist **sg)
439 {
440 	unsigned nbytes = bvec->bv_len;
441 	unsigned nsegs = 0, total = 0;
442 
443 	while (nbytes > 0) {
444 		unsigned offset = bvec->bv_offset + total;
445 		unsigned len = min(get_max_segment_size(q, bvec->bv_page,
446 					offset), nbytes);
447 		struct page *page = bvec->bv_page;
448 
449 		/*
450 		 * Unfortunately a fair number of drivers barf on scatterlists
451 		 * that have an offset larger than PAGE_SIZE, despite other
452 		 * subsystems dealing with that invariant just fine.  For now
453 		 * stick to the legacy format where we never present those from
454 		 * the block layer, but the code below should be removed once
455 		 * these offenders (mostly MMC/SD drivers) are fixed.
456 		 */
457 		page += (offset >> PAGE_SHIFT);
458 		offset &= ~PAGE_MASK;
459 
460 		*sg = blk_next_sg(sg, sglist);
461 		sg_set_page(*sg, page, len, offset);
462 
463 		total += len;
464 		nbytes -= len;
465 		nsegs++;
466 	}
467 
468 	return nsegs;
469 }
470 
471 static inline int __blk_bvec_map_sg(struct bio_vec bv,
472 		struct scatterlist *sglist, struct scatterlist **sg)
473 {
474 	*sg = blk_next_sg(sg, sglist);
475 	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
476 	return 1;
477 }
478 
479 /* only try to merge bvecs into one sg if they are from two bios */
480 static inline bool
481 __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
482 			   struct bio_vec *bvprv, struct scatterlist **sg)
483 {
484 
485 	int nbytes = bvec->bv_len;
486 
487 	if (!*sg)
488 		return false;
489 
490 	if ((*sg)->length + nbytes > queue_max_segment_size(q))
491 		return false;
492 
493 	if (!biovec_phys_mergeable(q, bvprv, bvec))
494 		return false;
495 
496 	(*sg)->length += nbytes;
497 
498 	return true;
499 }
500 
501 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
502 			     struct scatterlist *sglist,
503 			     struct scatterlist **sg)
504 {
505 	struct bio_vec bvec, bvprv = { NULL };
506 	struct bvec_iter iter;
507 	int nsegs = 0;
508 	bool new_bio = false;
509 
510 	for_each_bio(bio) {
511 		bio_for_each_bvec(bvec, bio, iter) {
512 			/*
513 			 * Only try to merge bvecs from two bios given we
514 			 * have done bio internal merge when adding pages
515 			 * to bio
516 			 */
517 			if (new_bio &&
518 			    __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
519 				goto next_bvec;
520 
521 			if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
522 				nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
523 			else
524 				nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
525  next_bvec:
526 			new_bio = false;
527 		}
528 		if (likely(bio->bi_iter.bi_size)) {
529 			bvprv = bvec;
530 			new_bio = true;
531 		}
532 	}
533 
534 	return nsegs;
535 }
536 
537 /*
538  * map a request to scatterlist, return number of sg entries setup. Caller
539  * must make sure sg can hold rq->nr_phys_segments entries
540  */
541 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
542 		struct scatterlist *sglist, struct scatterlist **last_sg)
543 {
544 	int nsegs = 0;
545 
546 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
547 		nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
548 	else if (rq->bio)
549 		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
550 
551 	if (*last_sg)
552 		sg_mark_end(*last_sg);
553 
554 	/*
555 	 * Something must have been wrong if the figured number of
556 	 * segment is bigger than number of req's physical segments
557 	 */
558 	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
559 
560 	return nsegs;
561 }
562 EXPORT_SYMBOL(__blk_rq_map_sg);
563 
564 static inline unsigned int blk_rq_get_max_segments(struct request *rq)
565 {
566 	if (req_op(rq) == REQ_OP_DISCARD)
567 		return queue_max_discard_segments(rq->q);
568 	return queue_max_segments(rq->q);
569 }
570 
571 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
572 						  sector_t offset)
573 {
574 	struct request_queue *q = rq->q;
575 	unsigned int max_sectors;
576 
577 	if (blk_rq_is_passthrough(rq))
578 		return q->limits.max_hw_sectors;
579 
580 	max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
581 	if (!q->limits.chunk_sectors ||
582 	    req_op(rq) == REQ_OP_DISCARD ||
583 	    req_op(rq) == REQ_OP_SECURE_ERASE)
584 		return max_sectors;
585 	return min(max_sectors,
586 		   blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
587 }
588 
589 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
590 		unsigned int nr_phys_segs)
591 {
592 	if (!blk_cgroup_mergeable(req, bio))
593 		goto no_merge;
594 
595 	if (blk_integrity_merge_bio(req->q, req, bio) == false)
596 		goto no_merge;
597 
598 	/* discard request merge won't add new segment */
599 	if (req_op(req) == REQ_OP_DISCARD)
600 		return 1;
601 
602 	if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
603 		goto no_merge;
604 
605 	/*
606 	 * This will form the start of a new hw segment.  Bump both
607 	 * counters.
608 	 */
609 	req->nr_phys_segments += nr_phys_segs;
610 	return 1;
611 
612 no_merge:
613 	req_set_nomerge(req->q, req);
614 	return 0;
615 }
616 
617 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
618 {
619 	if (req_gap_back_merge(req, bio))
620 		return 0;
621 	if (blk_integrity_rq(req) &&
622 	    integrity_req_gap_back_merge(req, bio))
623 		return 0;
624 	if (!bio_crypt_ctx_back_mergeable(req, bio))
625 		return 0;
626 	if (blk_rq_sectors(req) + bio_sectors(bio) >
627 	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
628 		req_set_nomerge(req->q, req);
629 		return 0;
630 	}
631 
632 	return ll_new_hw_segment(req, bio, nr_segs);
633 }
634 
635 static int ll_front_merge_fn(struct request *req, struct bio *bio,
636 		unsigned int nr_segs)
637 {
638 	if (req_gap_front_merge(req, bio))
639 		return 0;
640 	if (blk_integrity_rq(req) &&
641 	    integrity_req_gap_front_merge(req, bio))
642 		return 0;
643 	if (!bio_crypt_ctx_front_mergeable(req, bio))
644 		return 0;
645 	if (blk_rq_sectors(req) + bio_sectors(bio) >
646 	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
647 		req_set_nomerge(req->q, req);
648 		return 0;
649 	}
650 
651 	return ll_new_hw_segment(req, bio, nr_segs);
652 }
653 
654 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
655 		struct request *next)
656 {
657 	unsigned short segments = blk_rq_nr_discard_segments(req);
658 
659 	if (segments >= queue_max_discard_segments(q))
660 		goto no_merge;
661 	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
662 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
663 		goto no_merge;
664 
665 	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
666 	return true;
667 no_merge:
668 	req_set_nomerge(q, req);
669 	return false;
670 }
671 
672 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
673 				struct request *next)
674 {
675 	int total_phys_segments;
676 
677 	if (req_gap_back_merge(req, next->bio))
678 		return 0;
679 
680 	/*
681 	 * Will it become too large?
682 	 */
683 	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
684 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
685 		return 0;
686 
687 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
688 	if (total_phys_segments > blk_rq_get_max_segments(req))
689 		return 0;
690 
691 	if (!blk_cgroup_mergeable(req, next->bio))
692 		return 0;
693 
694 	if (blk_integrity_merge_rq(q, req, next) == false)
695 		return 0;
696 
697 	if (!bio_crypt_ctx_merge_rq(req, next))
698 		return 0;
699 
700 	/* Merge is OK... */
701 	req->nr_phys_segments = total_phys_segments;
702 	return 1;
703 }
704 
705 /**
706  * blk_rq_set_mixed_merge - mark a request as mixed merge
707  * @rq: request to mark as mixed merge
708  *
709  * Description:
710  *     @rq is about to be mixed merged.  Make sure the attributes
711  *     which can be mixed are set in each bio and mark @rq as mixed
712  *     merged.
713  */
714 void blk_rq_set_mixed_merge(struct request *rq)
715 {
716 	blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
717 	struct bio *bio;
718 
719 	if (rq->rq_flags & RQF_MIXED_MERGE)
720 		return;
721 
722 	/*
723 	 * @rq will no longer represent mixable attributes for all the
724 	 * contained bios.  It will just track those of the first one.
725 	 * Distributes the attributs to each bio.
726 	 */
727 	for (bio = rq->bio; bio; bio = bio->bi_next) {
728 		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
729 			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
730 		bio->bi_opf |= ff;
731 	}
732 	rq->rq_flags |= RQF_MIXED_MERGE;
733 }
734 
735 static void blk_account_io_merge_request(struct request *req)
736 {
737 	if (blk_do_io_stat(req)) {
738 		part_stat_lock();
739 		part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
740 		part_stat_unlock();
741 	}
742 }
743 
744 static enum elv_merge blk_try_req_merge(struct request *req,
745 					struct request *next)
746 {
747 	if (blk_discard_mergable(req))
748 		return ELEVATOR_DISCARD_MERGE;
749 	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
750 		return ELEVATOR_BACK_MERGE;
751 
752 	return ELEVATOR_NO_MERGE;
753 }
754 
755 /*
756  * For non-mq, this has to be called with the request spinlock acquired.
757  * For mq with scheduling, the appropriate queue wide lock should be held.
758  */
759 static struct request *attempt_merge(struct request_queue *q,
760 				     struct request *req, struct request *next)
761 {
762 	if (!rq_mergeable(req) || !rq_mergeable(next))
763 		return NULL;
764 
765 	if (req_op(req) != req_op(next))
766 		return NULL;
767 
768 	if (rq_data_dir(req) != rq_data_dir(next))
769 		return NULL;
770 
771 	if (req->ioprio != next->ioprio)
772 		return NULL;
773 
774 	/*
775 	 * If we are allowed to merge, then append bio list
776 	 * from next to rq and release next. merge_requests_fn
777 	 * will have updated segment counts, update sector
778 	 * counts here. Handle DISCARDs separately, as they
779 	 * have separate settings.
780 	 */
781 
782 	switch (blk_try_req_merge(req, next)) {
783 	case ELEVATOR_DISCARD_MERGE:
784 		if (!req_attempt_discard_merge(q, req, next))
785 			return NULL;
786 		break;
787 	case ELEVATOR_BACK_MERGE:
788 		if (!ll_merge_requests_fn(q, req, next))
789 			return NULL;
790 		break;
791 	default:
792 		return NULL;
793 	}
794 
795 	/*
796 	 * If failfast settings disagree or any of the two is already
797 	 * a mixed merge, mark both as mixed before proceeding.  This
798 	 * makes sure that all involved bios have mixable attributes
799 	 * set properly.
800 	 */
801 	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
802 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
803 	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
804 		blk_rq_set_mixed_merge(req);
805 		blk_rq_set_mixed_merge(next);
806 	}
807 
808 	/*
809 	 * At this point we have either done a back merge or front merge. We
810 	 * need the smaller start_time_ns of the merged requests to be the
811 	 * current request for accounting purposes.
812 	 */
813 	if (next->start_time_ns < req->start_time_ns)
814 		req->start_time_ns = next->start_time_ns;
815 
816 	req->biotail->bi_next = next->bio;
817 	req->biotail = next->biotail;
818 
819 	req->__data_len += blk_rq_bytes(next);
820 
821 	if (!blk_discard_mergable(req))
822 		elv_merge_requests(q, req, next);
823 
824 	/*
825 	 * 'next' is going away, so update stats accordingly
826 	 */
827 	blk_account_io_merge_request(next);
828 
829 	trace_block_rq_merge(next);
830 
831 	/*
832 	 * ownership of bio passed from next to req, return 'next' for
833 	 * the caller to free
834 	 */
835 	next->bio = NULL;
836 	return next;
837 }
838 
839 static struct request *attempt_back_merge(struct request_queue *q,
840 		struct request *rq)
841 {
842 	struct request *next = elv_latter_request(q, rq);
843 
844 	if (next)
845 		return attempt_merge(q, rq, next);
846 
847 	return NULL;
848 }
849 
850 static struct request *attempt_front_merge(struct request_queue *q,
851 		struct request *rq)
852 {
853 	struct request *prev = elv_former_request(q, rq);
854 
855 	if (prev)
856 		return attempt_merge(q, prev, rq);
857 
858 	return NULL;
859 }
860 
861 /*
862  * Try to merge 'next' into 'rq'. Return true if the merge happened, false
863  * otherwise. The caller is responsible for freeing 'next' if the merge
864  * happened.
865  */
866 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
867 			   struct request *next)
868 {
869 	return attempt_merge(q, rq, next);
870 }
871 
872 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
873 {
874 	if (!rq_mergeable(rq) || !bio_mergeable(bio))
875 		return false;
876 
877 	if (req_op(rq) != bio_op(bio))
878 		return false;
879 
880 	/* different data direction or already started, don't merge */
881 	if (bio_data_dir(bio) != rq_data_dir(rq))
882 		return false;
883 
884 	/* don't merge across cgroup boundaries */
885 	if (!blk_cgroup_mergeable(rq, bio))
886 		return false;
887 
888 	/* only merge integrity protected bio into ditto rq */
889 	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
890 		return false;
891 
892 	/* Only merge if the crypt contexts are compatible */
893 	if (!bio_crypt_rq_ctx_compatible(rq, bio))
894 		return false;
895 
896 	if (rq->ioprio != bio_prio(bio))
897 		return false;
898 
899 	return true;
900 }
901 
902 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
903 {
904 	if (blk_discard_mergable(rq))
905 		return ELEVATOR_DISCARD_MERGE;
906 	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
907 		return ELEVATOR_BACK_MERGE;
908 	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
909 		return ELEVATOR_FRONT_MERGE;
910 	return ELEVATOR_NO_MERGE;
911 }
912 
913 static void blk_account_io_merge_bio(struct request *req)
914 {
915 	if (!blk_do_io_stat(req))
916 		return;
917 
918 	part_stat_lock();
919 	part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
920 	part_stat_unlock();
921 }
922 
923 enum bio_merge_status {
924 	BIO_MERGE_OK,
925 	BIO_MERGE_NONE,
926 	BIO_MERGE_FAILED,
927 };
928 
929 static enum bio_merge_status bio_attempt_back_merge(struct request *req,
930 		struct bio *bio, unsigned int nr_segs)
931 {
932 	const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
933 
934 	if (!ll_back_merge_fn(req, bio, nr_segs))
935 		return BIO_MERGE_FAILED;
936 
937 	trace_block_bio_backmerge(bio);
938 	rq_qos_merge(req->q, req, bio);
939 
940 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
941 		blk_rq_set_mixed_merge(req);
942 
943 	req->biotail->bi_next = bio;
944 	req->biotail = bio;
945 	req->__data_len += bio->bi_iter.bi_size;
946 
947 	bio_crypt_free_ctx(bio);
948 
949 	blk_account_io_merge_bio(req);
950 	return BIO_MERGE_OK;
951 }
952 
953 static enum bio_merge_status bio_attempt_front_merge(struct request *req,
954 		struct bio *bio, unsigned int nr_segs)
955 {
956 	const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
957 
958 	if (!ll_front_merge_fn(req, bio, nr_segs))
959 		return BIO_MERGE_FAILED;
960 
961 	trace_block_bio_frontmerge(bio);
962 	rq_qos_merge(req->q, req, bio);
963 
964 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
965 		blk_rq_set_mixed_merge(req);
966 
967 	bio->bi_next = req->bio;
968 	req->bio = bio;
969 
970 	req->__sector = bio->bi_iter.bi_sector;
971 	req->__data_len += bio->bi_iter.bi_size;
972 
973 	bio_crypt_do_front_merge(req, bio);
974 
975 	blk_account_io_merge_bio(req);
976 	return BIO_MERGE_OK;
977 }
978 
979 static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
980 		struct request *req, struct bio *bio)
981 {
982 	unsigned short segments = blk_rq_nr_discard_segments(req);
983 
984 	if (segments >= queue_max_discard_segments(q))
985 		goto no_merge;
986 	if (blk_rq_sectors(req) + bio_sectors(bio) >
987 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
988 		goto no_merge;
989 
990 	rq_qos_merge(q, req, bio);
991 
992 	req->biotail->bi_next = bio;
993 	req->biotail = bio;
994 	req->__data_len += bio->bi_iter.bi_size;
995 	req->nr_phys_segments = segments + 1;
996 
997 	blk_account_io_merge_bio(req);
998 	return BIO_MERGE_OK;
999 no_merge:
1000 	req_set_nomerge(q, req);
1001 	return BIO_MERGE_FAILED;
1002 }
1003 
1004 static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
1005 						   struct request *rq,
1006 						   struct bio *bio,
1007 						   unsigned int nr_segs,
1008 						   bool sched_allow_merge)
1009 {
1010 	if (!blk_rq_merge_ok(rq, bio))
1011 		return BIO_MERGE_NONE;
1012 
1013 	switch (blk_try_merge(rq, bio)) {
1014 	case ELEVATOR_BACK_MERGE:
1015 		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1016 			return bio_attempt_back_merge(rq, bio, nr_segs);
1017 		break;
1018 	case ELEVATOR_FRONT_MERGE:
1019 		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1020 			return bio_attempt_front_merge(rq, bio, nr_segs);
1021 		break;
1022 	case ELEVATOR_DISCARD_MERGE:
1023 		return bio_attempt_discard_merge(q, rq, bio);
1024 	default:
1025 		return BIO_MERGE_NONE;
1026 	}
1027 
1028 	return BIO_MERGE_FAILED;
1029 }
1030 
1031 /**
1032  * blk_attempt_plug_merge - try to merge with %current's plugged list
1033  * @q: request_queue new bio is being queued at
1034  * @bio: new bio being queued
1035  * @nr_segs: number of segments in @bio
1036  * from the passed in @q already in the plug list
1037  *
1038  * Determine whether @bio being queued on @q can be merged with the previous
1039  * request on %current's plugged list.  Returns %true if merge was successful,
1040  * otherwise %false.
1041  *
1042  * Plugging coalesces IOs from the same issuer for the same purpose without
1043  * going through @q->queue_lock.  As such it's more of an issuing mechanism
1044  * than scheduling, and the request, while may have elvpriv data, is not
1045  * added on the elevator at this point.  In addition, we don't have
1046  * reliable access to the elevator outside queue lock.  Only check basic
1047  * merging parameters without querying the elevator.
1048  *
1049  * Caller must ensure !blk_queue_nomerges(q) beforehand.
1050  */
1051 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1052 		unsigned int nr_segs)
1053 {
1054 	struct blk_plug *plug;
1055 	struct request *rq;
1056 
1057 	plug = blk_mq_plug(bio);
1058 	if (!plug || rq_list_empty(plug->mq_list))
1059 		return false;
1060 
1061 	rq_list_for_each(&plug->mq_list, rq) {
1062 		if (rq->q == q) {
1063 			if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1064 			    BIO_MERGE_OK)
1065 				return true;
1066 			break;
1067 		}
1068 
1069 		/*
1070 		 * Only keep iterating plug list for merges if we have multiple
1071 		 * queues
1072 		 */
1073 		if (!plug->multiple_queues)
1074 			break;
1075 	}
1076 	return false;
1077 }
1078 
1079 /*
1080  * Iterate list of requests and see if we can merge this bio with any
1081  * of them.
1082  */
1083 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1084 			struct bio *bio, unsigned int nr_segs)
1085 {
1086 	struct request *rq;
1087 	int checked = 8;
1088 
1089 	list_for_each_entry_reverse(rq, list, queuelist) {
1090 		if (!checked--)
1091 			break;
1092 
1093 		switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1094 		case BIO_MERGE_NONE:
1095 			continue;
1096 		case BIO_MERGE_OK:
1097 			return true;
1098 		case BIO_MERGE_FAILED:
1099 			return false;
1100 		}
1101 
1102 	}
1103 
1104 	return false;
1105 }
1106 EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1107 
1108 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1109 		unsigned int nr_segs, struct request **merged_request)
1110 {
1111 	struct request *rq;
1112 
1113 	switch (elv_merge(q, &rq, bio)) {
1114 	case ELEVATOR_BACK_MERGE:
1115 		if (!blk_mq_sched_allow_merge(q, rq, bio))
1116 			return false;
1117 		if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1118 			return false;
1119 		*merged_request = attempt_back_merge(q, rq);
1120 		if (!*merged_request)
1121 			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1122 		return true;
1123 	case ELEVATOR_FRONT_MERGE:
1124 		if (!blk_mq_sched_allow_merge(q, rq, bio))
1125 			return false;
1126 		if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1127 			return false;
1128 		*merged_request = attempt_front_merge(q, rq);
1129 		if (!*merged_request)
1130 			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1131 		return true;
1132 	case ELEVATOR_DISCARD_MERGE:
1133 		return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1134 	default:
1135 		return false;
1136 	}
1137 }
1138 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
1139