xref: /openbmc/linux/block/blk-merge.c (revision cd4d09ec)
1 /*
2  * Functions related to segment and merge handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
9 
10 #include <trace/events/block.h>
11 
12 #include "blk.h"
13 
14 static struct bio *blk_bio_discard_split(struct request_queue *q,
15 					 struct bio *bio,
16 					 struct bio_set *bs,
17 					 unsigned *nsegs)
18 {
19 	unsigned int max_discard_sectors, granularity;
20 	int alignment;
21 	sector_t tmp;
22 	unsigned split_sectors;
23 
24 	*nsegs = 1;
25 
26 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
27 	granularity = max(q->limits.discard_granularity >> 9, 1U);
28 
29 	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
30 	max_discard_sectors -= max_discard_sectors % granularity;
31 
32 	if (unlikely(!max_discard_sectors)) {
33 		/* XXX: warn */
34 		return NULL;
35 	}
36 
37 	if (bio_sectors(bio) <= max_discard_sectors)
38 		return NULL;
39 
40 	split_sectors = max_discard_sectors;
41 
42 	/*
43 	 * If the next starting sector would be misaligned, stop the discard at
44 	 * the previous aligned sector.
45 	 */
46 	alignment = (q->limits.discard_alignment >> 9) % granularity;
47 
48 	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
49 	tmp = sector_div(tmp, granularity);
50 
51 	if (split_sectors > tmp)
52 		split_sectors -= tmp;
53 
54 	return bio_split(bio, split_sectors, GFP_NOIO, bs);
55 }
56 
57 static struct bio *blk_bio_write_same_split(struct request_queue *q,
58 					    struct bio *bio,
59 					    struct bio_set *bs,
60 					    unsigned *nsegs)
61 {
62 	*nsegs = 1;
63 
64 	if (!q->limits.max_write_same_sectors)
65 		return NULL;
66 
67 	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
68 		return NULL;
69 
70 	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
71 }
72 
73 static struct bio *blk_bio_segment_split(struct request_queue *q,
74 					 struct bio *bio,
75 					 struct bio_set *bs,
76 					 unsigned *segs)
77 {
78 	struct bio_vec bv, bvprv, *bvprvp = NULL;
79 	struct bvec_iter iter;
80 	unsigned seg_size = 0, nsegs = 0, sectors = 0;
81 	unsigned front_seg_size = bio->bi_seg_front_size;
82 	bool do_split = true;
83 	struct bio *new = NULL;
84 
85 	bio_for_each_segment(bv, bio, iter) {
86 		/*
87 		 * If the queue doesn't support SG gaps and adding this
88 		 * offset would create a gap, disallow it.
89 		 */
90 		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
91 			goto split;
92 
93 		if (sectors + (bv.bv_len >> 9) >
94 				blk_max_size_offset(q, bio->bi_iter.bi_sector)) {
95 			/*
96 			 * Consider this a new segment if we're splitting in
97 			 * the middle of this vector.
98 			 */
99 			if (nsegs < queue_max_segments(q) &&
100 			    sectors < blk_max_size_offset(q,
101 						bio->bi_iter.bi_sector)) {
102 				nsegs++;
103 				sectors = blk_max_size_offset(q,
104 						bio->bi_iter.bi_sector);
105 			}
106 			goto split;
107 		}
108 
109 		if (bvprvp && blk_queue_cluster(q)) {
110 			if (seg_size + bv.bv_len > queue_max_segment_size(q))
111 				goto new_segment;
112 			if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
113 				goto new_segment;
114 			if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
115 				goto new_segment;
116 
117 			seg_size += bv.bv_len;
118 			bvprv = bv;
119 			bvprvp = &bvprv;
120 			sectors += bv.bv_len >> 9;
121 
122 			if (nsegs == 1 && seg_size > front_seg_size)
123 				front_seg_size = seg_size;
124 			continue;
125 		}
126 new_segment:
127 		if (nsegs == queue_max_segments(q))
128 			goto split;
129 
130 		nsegs++;
131 		bvprv = bv;
132 		bvprvp = &bvprv;
133 		seg_size = bv.bv_len;
134 		sectors += bv.bv_len >> 9;
135 
136 		if (nsegs == 1 && seg_size > front_seg_size)
137 			front_seg_size = seg_size;
138 	}
139 
140 	do_split = false;
141 split:
142 	*segs = nsegs;
143 
144 	if (do_split) {
145 		new = bio_split(bio, sectors, GFP_NOIO, bs);
146 		if (new)
147 			bio = new;
148 	}
149 
150 	bio->bi_seg_front_size = front_seg_size;
151 	if (seg_size > bio->bi_seg_back_size)
152 		bio->bi_seg_back_size = seg_size;
153 
154 	return do_split ? new : NULL;
155 }
156 
157 void blk_queue_split(struct request_queue *q, struct bio **bio,
158 		     struct bio_set *bs)
159 {
160 	struct bio *split, *res;
161 	unsigned nsegs;
162 
163 	if ((*bio)->bi_rw & REQ_DISCARD)
164 		split = blk_bio_discard_split(q, *bio, bs, &nsegs);
165 	else if ((*bio)->bi_rw & REQ_WRITE_SAME)
166 		split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
167 	else
168 		split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
169 
170 	/* physical segments can be figured out during splitting */
171 	res = split ? split : *bio;
172 	res->bi_phys_segments = nsegs;
173 	bio_set_flag(res, BIO_SEG_VALID);
174 
175 	if (split) {
176 		/* there isn't chance to merge the splitted bio */
177 		split->bi_rw |= REQ_NOMERGE;
178 
179 		bio_chain(split, *bio);
180 		trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
181 		generic_make_request(*bio);
182 		*bio = split;
183 	}
184 }
185 EXPORT_SYMBOL(blk_queue_split);
186 
187 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
188 					     struct bio *bio,
189 					     bool no_sg_merge)
190 {
191 	struct bio_vec bv, bvprv = { NULL };
192 	int cluster, prev = 0;
193 	unsigned int seg_size, nr_phys_segs;
194 	struct bio *fbio, *bbio;
195 	struct bvec_iter iter;
196 
197 	if (!bio)
198 		return 0;
199 
200 	/*
201 	 * This should probably be returning 0, but blk_add_request_payload()
202 	 * (Christoph!!!!)
203 	 */
204 	if (bio->bi_rw & REQ_DISCARD)
205 		return 1;
206 
207 	if (bio->bi_rw & REQ_WRITE_SAME)
208 		return 1;
209 
210 	fbio = bio;
211 	cluster = blk_queue_cluster(q);
212 	seg_size = 0;
213 	nr_phys_segs = 0;
214 	for_each_bio(bio) {
215 		bio_for_each_segment(bv, bio, iter) {
216 			/*
217 			 * If SG merging is disabled, each bio vector is
218 			 * a segment
219 			 */
220 			if (no_sg_merge)
221 				goto new_segment;
222 
223 			if (prev && cluster) {
224 				if (seg_size + bv.bv_len
225 				    > queue_max_segment_size(q))
226 					goto new_segment;
227 				if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
228 					goto new_segment;
229 				if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
230 					goto new_segment;
231 
232 				seg_size += bv.bv_len;
233 				bvprv = bv;
234 				continue;
235 			}
236 new_segment:
237 			if (nr_phys_segs == 1 && seg_size >
238 			    fbio->bi_seg_front_size)
239 				fbio->bi_seg_front_size = seg_size;
240 
241 			nr_phys_segs++;
242 			bvprv = bv;
243 			prev = 1;
244 			seg_size = bv.bv_len;
245 		}
246 		bbio = bio;
247 	}
248 
249 	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
250 		fbio->bi_seg_front_size = seg_size;
251 	if (seg_size > bbio->bi_seg_back_size)
252 		bbio->bi_seg_back_size = seg_size;
253 
254 	return nr_phys_segs;
255 }
256 
257 void blk_recalc_rq_segments(struct request *rq)
258 {
259 	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
260 			&rq->q->queue_flags);
261 
262 	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
263 			no_sg_merge);
264 }
265 
266 void blk_recount_segments(struct request_queue *q, struct bio *bio)
267 {
268 	unsigned short seg_cnt;
269 
270 	/* estimate segment number by bi_vcnt for non-cloned bio */
271 	if (bio_flagged(bio, BIO_CLONED))
272 		seg_cnt = bio_segments(bio);
273 	else
274 		seg_cnt = bio->bi_vcnt;
275 
276 	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
277 			(seg_cnt < queue_max_segments(q)))
278 		bio->bi_phys_segments = seg_cnt;
279 	else {
280 		struct bio *nxt = bio->bi_next;
281 
282 		bio->bi_next = NULL;
283 		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
284 		bio->bi_next = nxt;
285 	}
286 
287 	bio_set_flag(bio, BIO_SEG_VALID);
288 }
289 EXPORT_SYMBOL(blk_recount_segments);
290 
291 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
292 				   struct bio *nxt)
293 {
294 	struct bio_vec end_bv = { NULL }, nxt_bv;
295 	struct bvec_iter iter;
296 
297 	if (!blk_queue_cluster(q))
298 		return 0;
299 
300 	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
301 	    queue_max_segment_size(q))
302 		return 0;
303 
304 	if (!bio_has_data(bio))
305 		return 1;
306 
307 	bio_for_each_segment(end_bv, bio, iter)
308 		if (end_bv.bv_len == iter.bi_size)
309 			break;
310 
311 	nxt_bv = bio_iovec(nxt);
312 
313 	if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
314 		return 0;
315 
316 	/*
317 	 * bio and nxt are contiguous in memory; check if the queue allows
318 	 * these two to be merged into one
319 	 */
320 	if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
321 		return 1;
322 
323 	return 0;
324 }
325 
326 static inline void
327 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
328 		     struct scatterlist *sglist, struct bio_vec *bvprv,
329 		     struct scatterlist **sg, int *nsegs, int *cluster)
330 {
331 
332 	int nbytes = bvec->bv_len;
333 
334 	if (*sg && *cluster) {
335 		if ((*sg)->length + nbytes > queue_max_segment_size(q))
336 			goto new_segment;
337 
338 		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
339 			goto new_segment;
340 		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
341 			goto new_segment;
342 
343 		(*sg)->length += nbytes;
344 	} else {
345 new_segment:
346 		if (!*sg)
347 			*sg = sglist;
348 		else {
349 			/*
350 			 * If the driver previously mapped a shorter
351 			 * list, we could see a termination bit
352 			 * prematurely unless it fully inits the sg
353 			 * table on each mapping. We KNOW that there
354 			 * must be more entries here or the driver
355 			 * would be buggy, so force clear the
356 			 * termination bit to avoid doing a full
357 			 * sg_init_table() in drivers for each command.
358 			 */
359 			sg_unmark_end(*sg);
360 			*sg = sg_next(*sg);
361 		}
362 
363 		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
364 		(*nsegs)++;
365 	}
366 	*bvprv = *bvec;
367 }
368 
369 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
370 			     struct scatterlist *sglist,
371 			     struct scatterlist **sg)
372 {
373 	struct bio_vec bvec, bvprv = { NULL };
374 	struct bvec_iter iter;
375 	int nsegs, cluster;
376 
377 	nsegs = 0;
378 	cluster = blk_queue_cluster(q);
379 
380 	if (bio->bi_rw & REQ_DISCARD) {
381 		/*
382 		 * This is a hack - drivers should be neither modifying the
383 		 * biovec, nor relying on bi_vcnt - but because of
384 		 * blk_add_request_payload(), a discard bio may or may not have
385 		 * a payload we need to set up here (thank you Christoph) and
386 		 * bi_vcnt is really the only way of telling if we need to.
387 		 */
388 
389 		if (bio->bi_vcnt)
390 			goto single_segment;
391 
392 		return 0;
393 	}
394 
395 	if (bio->bi_rw & REQ_WRITE_SAME) {
396 single_segment:
397 		*sg = sglist;
398 		bvec = bio_iovec(bio);
399 		sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
400 		return 1;
401 	}
402 
403 	for_each_bio(bio)
404 		bio_for_each_segment(bvec, bio, iter)
405 			__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
406 					     &nsegs, &cluster);
407 
408 	return nsegs;
409 }
410 
411 /*
412  * map a request to scatterlist, return number of sg entries setup. Caller
413  * must make sure sg can hold rq->nr_phys_segments entries
414  */
415 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
416 		  struct scatterlist *sglist)
417 {
418 	struct scatterlist *sg = NULL;
419 	int nsegs = 0;
420 
421 	if (rq->bio)
422 		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
423 
424 	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
425 	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
426 		unsigned int pad_len =
427 			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
428 
429 		sg->length += pad_len;
430 		rq->extra_len += pad_len;
431 	}
432 
433 	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
434 		if (rq->cmd_flags & REQ_WRITE)
435 			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
436 
437 		sg_unmark_end(sg);
438 		sg = sg_next(sg);
439 		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
440 			    q->dma_drain_size,
441 			    ((unsigned long)q->dma_drain_buffer) &
442 			    (PAGE_SIZE - 1));
443 		nsegs++;
444 		rq->extra_len += q->dma_drain_size;
445 	}
446 
447 	if (sg)
448 		sg_mark_end(sg);
449 
450 	/*
451 	 * Something must have been wrong if the figured number of
452 	 * segment is bigger than number of req's physical segments
453 	 */
454 	WARN_ON(nsegs > rq->nr_phys_segments);
455 
456 	return nsegs;
457 }
458 EXPORT_SYMBOL(blk_rq_map_sg);
459 
460 static inline int ll_new_hw_segment(struct request_queue *q,
461 				    struct request *req,
462 				    struct bio *bio)
463 {
464 	int nr_phys_segs = bio_phys_segments(q, bio);
465 
466 	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
467 		goto no_merge;
468 
469 	if (blk_integrity_merge_bio(q, req, bio) == false)
470 		goto no_merge;
471 
472 	/*
473 	 * This will form the start of a new hw segment.  Bump both
474 	 * counters.
475 	 */
476 	req->nr_phys_segments += nr_phys_segs;
477 	return 1;
478 
479 no_merge:
480 	req->cmd_flags |= REQ_NOMERGE;
481 	if (req == q->last_merge)
482 		q->last_merge = NULL;
483 	return 0;
484 }
485 
486 int ll_back_merge_fn(struct request_queue *q, struct request *req,
487 		     struct bio *bio)
488 {
489 	if (req_gap_back_merge(req, bio))
490 		return 0;
491 	if (blk_integrity_rq(req) &&
492 	    integrity_req_gap_back_merge(req, bio))
493 		return 0;
494 	if (blk_rq_sectors(req) + bio_sectors(bio) >
495 	    blk_rq_get_max_sectors(req)) {
496 		req->cmd_flags |= REQ_NOMERGE;
497 		if (req == q->last_merge)
498 			q->last_merge = NULL;
499 		return 0;
500 	}
501 	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
502 		blk_recount_segments(q, req->biotail);
503 	if (!bio_flagged(bio, BIO_SEG_VALID))
504 		blk_recount_segments(q, bio);
505 
506 	return ll_new_hw_segment(q, req, bio);
507 }
508 
509 int ll_front_merge_fn(struct request_queue *q, struct request *req,
510 		      struct bio *bio)
511 {
512 
513 	if (req_gap_front_merge(req, bio))
514 		return 0;
515 	if (blk_integrity_rq(req) &&
516 	    integrity_req_gap_front_merge(req, bio))
517 		return 0;
518 	if (blk_rq_sectors(req) + bio_sectors(bio) >
519 	    blk_rq_get_max_sectors(req)) {
520 		req->cmd_flags |= REQ_NOMERGE;
521 		if (req == q->last_merge)
522 			q->last_merge = NULL;
523 		return 0;
524 	}
525 	if (!bio_flagged(bio, BIO_SEG_VALID))
526 		blk_recount_segments(q, bio);
527 	if (!bio_flagged(req->bio, BIO_SEG_VALID))
528 		blk_recount_segments(q, req->bio);
529 
530 	return ll_new_hw_segment(q, req, bio);
531 }
532 
533 /*
534  * blk-mq uses req->special to carry normal driver per-request payload, it
535  * does not indicate a prepared command that we cannot merge with.
536  */
537 static bool req_no_special_merge(struct request *req)
538 {
539 	struct request_queue *q = req->q;
540 
541 	return !q->mq_ops && req->special;
542 }
543 
544 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
545 				struct request *next)
546 {
547 	int total_phys_segments;
548 	unsigned int seg_size =
549 		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
550 
551 	/*
552 	 * First check if the either of the requests are re-queued
553 	 * requests.  Can't merge them if they are.
554 	 */
555 	if (req_no_special_merge(req) || req_no_special_merge(next))
556 		return 0;
557 
558 	if (req_gap_back_merge(req, next->bio))
559 		return 0;
560 
561 	/*
562 	 * Will it become too large?
563 	 */
564 	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
565 	    blk_rq_get_max_sectors(req))
566 		return 0;
567 
568 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
569 	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
570 		if (req->nr_phys_segments == 1)
571 			req->bio->bi_seg_front_size = seg_size;
572 		if (next->nr_phys_segments == 1)
573 			next->biotail->bi_seg_back_size = seg_size;
574 		total_phys_segments--;
575 	}
576 
577 	if (total_phys_segments > queue_max_segments(q))
578 		return 0;
579 
580 	if (blk_integrity_merge_rq(q, req, next) == false)
581 		return 0;
582 
583 	/* Merge is OK... */
584 	req->nr_phys_segments = total_phys_segments;
585 	return 1;
586 }
587 
588 /**
589  * blk_rq_set_mixed_merge - mark a request as mixed merge
590  * @rq: request to mark as mixed merge
591  *
592  * Description:
593  *     @rq is about to be mixed merged.  Make sure the attributes
594  *     which can be mixed are set in each bio and mark @rq as mixed
595  *     merged.
596  */
597 void blk_rq_set_mixed_merge(struct request *rq)
598 {
599 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
600 	struct bio *bio;
601 
602 	if (rq->cmd_flags & REQ_MIXED_MERGE)
603 		return;
604 
605 	/*
606 	 * @rq will no longer represent mixable attributes for all the
607 	 * contained bios.  It will just track those of the first one.
608 	 * Distributes the attributs to each bio.
609 	 */
610 	for (bio = rq->bio; bio; bio = bio->bi_next) {
611 		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
612 			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
613 		bio->bi_rw |= ff;
614 	}
615 	rq->cmd_flags |= REQ_MIXED_MERGE;
616 }
617 
618 static void blk_account_io_merge(struct request *req)
619 {
620 	if (blk_do_io_stat(req)) {
621 		struct hd_struct *part;
622 		int cpu;
623 
624 		cpu = part_stat_lock();
625 		part = req->part;
626 
627 		part_round_stats(cpu, part);
628 		part_dec_in_flight(part, rq_data_dir(req));
629 
630 		hd_struct_put(part);
631 		part_stat_unlock();
632 	}
633 }
634 
635 /*
636  * Has to be called with the request spinlock acquired
637  */
638 static int attempt_merge(struct request_queue *q, struct request *req,
639 			  struct request *next)
640 {
641 	if (!rq_mergeable(req) || !rq_mergeable(next))
642 		return 0;
643 
644 	if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
645 		return 0;
646 
647 	/*
648 	 * not contiguous
649 	 */
650 	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
651 		return 0;
652 
653 	if (rq_data_dir(req) != rq_data_dir(next)
654 	    || req->rq_disk != next->rq_disk
655 	    || req_no_special_merge(next))
656 		return 0;
657 
658 	if (req->cmd_flags & REQ_WRITE_SAME &&
659 	    !blk_write_same_mergeable(req->bio, next->bio))
660 		return 0;
661 
662 	/*
663 	 * If we are allowed to merge, then append bio list
664 	 * from next to rq and release next. merge_requests_fn
665 	 * will have updated segment counts, update sector
666 	 * counts here.
667 	 */
668 	if (!ll_merge_requests_fn(q, req, next))
669 		return 0;
670 
671 	/*
672 	 * If failfast settings disagree or any of the two is already
673 	 * a mixed merge, mark both as mixed before proceeding.  This
674 	 * makes sure that all involved bios have mixable attributes
675 	 * set properly.
676 	 */
677 	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
678 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
679 	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
680 		blk_rq_set_mixed_merge(req);
681 		blk_rq_set_mixed_merge(next);
682 	}
683 
684 	/*
685 	 * At this point we have either done a back merge
686 	 * or front merge. We need the smaller start_time of
687 	 * the merged requests to be the current request
688 	 * for accounting purposes.
689 	 */
690 	if (time_after(req->start_time, next->start_time))
691 		req->start_time = next->start_time;
692 
693 	req->biotail->bi_next = next->bio;
694 	req->biotail = next->biotail;
695 
696 	req->__data_len += blk_rq_bytes(next);
697 
698 	elv_merge_requests(q, req, next);
699 
700 	/*
701 	 * 'next' is going away, so update stats accordingly
702 	 */
703 	blk_account_io_merge(next);
704 
705 	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
706 	if (blk_rq_cpu_valid(next))
707 		req->cpu = next->cpu;
708 
709 	/* owner-ship of bio passed from next to req */
710 	next->bio = NULL;
711 	__blk_put_request(q, next);
712 	return 1;
713 }
714 
715 int attempt_back_merge(struct request_queue *q, struct request *rq)
716 {
717 	struct request *next = elv_latter_request(q, rq);
718 
719 	if (next)
720 		return attempt_merge(q, rq, next);
721 
722 	return 0;
723 }
724 
725 int attempt_front_merge(struct request_queue *q, struct request *rq)
726 {
727 	struct request *prev = elv_former_request(q, rq);
728 
729 	if (prev)
730 		return attempt_merge(q, prev, rq);
731 
732 	return 0;
733 }
734 
735 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
736 			  struct request *next)
737 {
738 	return attempt_merge(q, rq, next);
739 }
740 
741 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
742 {
743 	if (!rq_mergeable(rq) || !bio_mergeable(bio))
744 		return false;
745 
746 	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
747 		return false;
748 
749 	/* different data direction or already started, don't merge */
750 	if (bio_data_dir(bio) != rq_data_dir(rq))
751 		return false;
752 
753 	/* must be same device and not a special request */
754 	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
755 		return false;
756 
757 	/* only merge integrity protected bio into ditto rq */
758 	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
759 		return false;
760 
761 	/* must be using the same buffer */
762 	if (rq->cmd_flags & REQ_WRITE_SAME &&
763 	    !blk_write_same_mergeable(rq->bio, bio))
764 		return false;
765 
766 	return true;
767 }
768 
769 int blk_try_merge(struct request *rq, struct bio *bio)
770 {
771 	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
772 		return ELEVATOR_BACK_MERGE;
773 	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
774 		return ELEVATOR_FRONT_MERGE;
775 	return ELEVATOR_NO_MERGE;
776 }
777