1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to segment and merge handling
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/scatterlist.h>
11 #include <linux/part_stat.h>
12 #include <linux/blk-cgroup.h>
13
14 #include <trace/events/block.h>
15
16 #include "blk.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-throttle.h"
20
bio_get_first_bvec(struct bio * bio,struct bio_vec * bv)21 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
22 {
23 *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
24 }
25
bio_get_last_bvec(struct bio * bio,struct bio_vec * bv)26 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
27 {
28 struct bvec_iter iter = bio->bi_iter;
29 int idx;
30
31 bio_get_first_bvec(bio, bv);
32 if (bv->bv_len == bio->bi_iter.bi_size)
33 return; /* this bio only has a single bvec */
34
35 bio_advance_iter(bio, &iter, iter.bi_size);
36
37 if (!iter.bi_bvec_done)
38 idx = iter.bi_idx - 1;
39 else /* in the middle of bvec */
40 idx = iter.bi_idx;
41
42 *bv = bio->bi_io_vec[idx];
43
44 /*
45 * iter.bi_bvec_done records actual length of the last bvec
46 * if this bio ends in the middle of one io vector
47 */
48 if (iter.bi_bvec_done)
49 bv->bv_len = iter.bi_bvec_done;
50 }
51
bio_will_gap(struct request_queue * q,struct request * prev_rq,struct bio * prev,struct bio * next)52 static inline bool bio_will_gap(struct request_queue *q,
53 struct request *prev_rq, struct bio *prev, struct bio *next)
54 {
55 struct bio_vec pb, nb;
56
57 if (!bio_has_data(prev) || !queue_virt_boundary(q))
58 return false;
59
60 /*
61 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
62 * is quite difficult to respect the sg gap limit. We work hard to
63 * merge a huge number of small single bios in case of mkfs.
64 */
65 if (prev_rq)
66 bio_get_first_bvec(prev_rq->bio, &pb);
67 else
68 bio_get_first_bvec(prev, &pb);
69 if (pb.bv_offset & queue_virt_boundary(q))
70 return true;
71
72 /*
73 * We don't need to worry about the situation that the merged segment
74 * ends in unaligned virt boundary:
75 *
76 * - if 'pb' ends aligned, the merged segment ends aligned
77 * - if 'pb' ends unaligned, the next bio must include
78 * one single bvec of 'nb', otherwise the 'nb' can't
79 * merge with 'pb'
80 */
81 bio_get_last_bvec(prev, &pb);
82 bio_get_first_bvec(next, &nb);
83 if (biovec_phys_mergeable(q, &pb, &nb))
84 return false;
85 return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
86 }
87
req_gap_back_merge(struct request * req,struct bio * bio)88 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
89 {
90 return bio_will_gap(req->q, req, req->biotail, bio);
91 }
92
req_gap_front_merge(struct request * req,struct bio * bio)93 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
94 {
95 return bio_will_gap(req->q, NULL, bio, req->bio);
96 }
97
98 /*
99 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
100 * is defined as 'unsigned int', meantime it has to be aligned to with the
101 * logical block size, which is the minimum accepted unit by hardware.
102 */
bio_allowed_max_sectors(const struct queue_limits * lim)103 static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
104 {
105 return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
106 }
107
bio_split_discard(struct bio * bio,const struct queue_limits * lim,unsigned * nsegs,struct bio_set * bs)108 static struct bio *bio_split_discard(struct bio *bio,
109 const struct queue_limits *lim,
110 unsigned *nsegs, struct bio_set *bs)
111 {
112 unsigned int max_discard_sectors, granularity;
113 sector_t tmp;
114 unsigned split_sectors;
115
116 *nsegs = 1;
117
118 /* Zero-sector (unknown) and one-sector granularities are the same. */
119 granularity = max(lim->discard_granularity >> 9, 1U);
120
121 max_discard_sectors =
122 min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
123 max_discard_sectors -= max_discard_sectors % granularity;
124
125 if (unlikely(!max_discard_sectors)) {
126 /* XXX: warn */
127 return NULL;
128 }
129
130 if (bio_sectors(bio) <= max_discard_sectors)
131 return NULL;
132
133 split_sectors = max_discard_sectors;
134
135 /*
136 * If the next starting sector would be misaligned, stop the discard at
137 * the previous aligned sector.
138 */
139 tmp = bio->bi_iter.bi_sector + split_sectors -
140 ((lim->discard_alignment >> 9) % granularity);
141 tmp = sector_div(tmp, granularity);
142
143 if (split_sectors > tmp)
144 split_sectors -= tmp;
145
146 return bio_split(bio, split_sectors, GFP_NOIO, bs);
147 }
148
bio_split_write_zeroes(struct bio * bio,const struct queue_limits * lim,unsigned * nsegs,struct bio_set * bs)149 static struct bio *bio_split_write_zeroes(struct bio *bio,
150 const struct queue_limits *lim,
151 unsigned *nsegs, struct bio_set *bs)
152 {
153 *nsegs = 0;
154 if (!lim->max_write_zeroes_sectors)
155 return NULL;
156 if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
157 return NULL;
158 return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
159 }
160
161 /*
162 * Return the maximum number of sectors from the start of a bio that may be
163 * submitted as a single request to a block device. If enough sectors remain,
164 * align the end to the physical block size. Otherwise align the end to the
165 * logical block size. This approach minimizes the number of non-aligned
166 * requests that are submitted to a block device if the start of a bio is not
167 * aligned to a physical block boundary.
168 */
get_max_io_size(struct bio * bio,const struct queue_limits * lim)169 static inline unsigned get_max_io_size(struct bio *bio,
170 const struct queue_limits *lim)
171 {
172 unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
173 unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
174 unsigned max_sectors = lim->max_sectors, start, end;
175
176 if (lim->chunk_sectors) {
177 max_sectors = min(max_sectors,
178 blk_chunk_sectors_left(bio->bi_iter.bi_sector,
179 lim->chunk_sectors));
180 }
181
182 start = bio->bi_iter.bi_sector & (pbs - 1);
183 end = (start + max_sectors) & ~(pbs - 1);
184 if (end > start)
185 return end - start;
186 return max_sectors & ~(lbs - 1);
187 }
188
189 /**
190 * get_max_segment_size() - maximum number of bytes to add as a single segment
191 * @lim: Request queue limits.
192 * @start_page: See below.
193 * @offset: Offset from @start_page where to add a segment.
194 *
195 * Returns the maximum number of bytes that can be added as a single segment.
196 */
get_max_segment_size(const struct queue_limits * lim,struct page * start_page,unsigned long offset)197 static inline unsigned get_max_segment_size(const struct queue_limits *lim,
198 struct page *start_page, unsigned long offset)
199 {
200 unsigned long mask = lim->seg_boundary_mask;
201
202 offset = mask & (page_to_phys(start_page) + offset);
203
204 /*
205 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
206 * after having calculated the minimum.
207 */
208 return min(mask - offset, (unsigned long)lim->max_segment_size - 1) + 1;
209 }
210
211 /**
212 * bvec_split_segs - verify whether or not a bvec should be split in the middle
213 * @lim: [in] queue limits to split based on
214 * @bv: [in] bvec to examine
215 * @nsegs: [in,out] Number of segments in the bio being built. Incremented
216 * by the number of segments from @bv that may be appended to that
217 * bio without exceeding @max_segs
218 * @bytes: [in,out] Number of bytes in the bio being built. Incremented
219 * by the number of bytes from @bv that may be appended to that
220 * bio without exceeding @max_bytes
221 * @max_segs: [in] upper bound for *@nsegs
222 * @max_bytes: [in] upper bound for *@bytes
223 *
224 * When splitting a bio, it can happen that a bvec is encountered that is too
225 * big to fit in a single segment and hence that it has to be split in the
226 * middle. This function verifies whether or not that should happen. The value
227 * %true is returned if and only if appending the entire @bv to a bio with
228 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
229 * the block driver.
230 */
bvec_split_segs(const struct queue_limits * lim,const struct bio_vec * bv,unsigned * nsegs,unsigned * bytes,unsigned max_segs,unsigned max_bytes)231 static bool bvec_split_segs(const struct queue_limits *lim,
232 const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
233 unsigned max_segs, unsigned max_bytes)
234 {
235 unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
236 unsigned len = min(bv->bv_len, max_len);
237 unsigned total_len = 0;
238 unsigned seg_size = 0;
239
240 while (len && *nsegs < max_segs) {
241 seg_size = get_max_segment_size(lim, bv->bv_page,
242 bv->bv_offset + total_len);
243 seg_size = min(seg_size, len);
244
245 (*nsegs)++;
246 total_len += seg_size;
247 len -= seg_size;
248
249 if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
250 break;
251 }
252
253 *bytes += total_len;
254
255 /* tell the caller to split the bvec if it is too big to fit */
256 return len > 0 || bv->bv_len > max_len;
257 }
258
bio_split_alignment(struct bio * bio,const struct queue_limits * lim)259 static unsigned int bio_split_alignment(struct bio *bio,
260 const struct queue_limits *lim)
261 {
262 if (op_is_write(bio_op(bio)) && lim->zone_write_granularity)
263 return lim->zone_write_granularity;
264 return lim->logical_block_size;
265 }
266
267 /**
268 * bio_split_rw - split a bio in two bios
269 * @bio: [in] bio to be split
270 * @lim: [in] queue limits to split based on
271 * @segs: [out] number of segments in the bio with the first half of the sectors
272 * @bs: [in] bio set to allocate the clone from
273 * @max_bytes: [in] maximum number of bytes per bio
274 *
275 * Clone @bio, update the bi_iter of the clone to represent the first sectors
276 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
277 * following is guaranteed for the cloned bio:
278 * - That it has at most @max_bytes worth of data
279 * - That it has at most queue_max_segments(@q) segments.
280 *
281 * Except for discard requests the cloned bio will point at the bi_io_vec of
282 * the original bio. It is the responsibility of the caller to ensure that the
283 * original bio is not freed before the cloned bio. The caller is also
284 * responsible for ensuring that @bs is only destroyed after processing of the
285 * split bio has finished.
286 */
bio_split_rw(struct bio * bio,const struct queue_limits * lim,unsigned * segs,struct bio_set * bs,unsigned max_bytes)287 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
288 unsigned *segs, struct bio_set *bs, unsigned max_bytes)
289 {
290 struct bio_vec bv, bvprv, *bvprvp = NULL;
291 struct bvec_iter iter;
292 unsigned nsegs = 0, bytes = 0;
293
294 bio_for_each_bvec(bv, bio, iter) {
295 /*
296 * If the queue doesn't support SG gaps and adding this
297 * offset would create a gap, disallow it.
298 */
299 if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
300 goto split;
301
302 if (nsegs < lim->max_segments &&
303 bytes + bv.bv_len <= max_bytes &&
304 bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
305 nsegs++;
306 bytes += bv.bv_len;
307 } else {
308 if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
309 lim->max_segments, max_bytes))
310 goto split;
311 }
312
313 bvprv = bv;
314 bvprvp = &bvprv;
315 }
316
317 *segs = nsegs;
318 return NULL;
319 split:
320 /*
321 * We can't sanely support splitting for a REQ_NOWAIT bio. End it
322 * with EAGAIN if splitting is required and return an error pointer.
323 */
324 if (bio->bi_opf & REQ_NOWAIT) {
325 bio->bi_status = BLK_STS_AGAIN;
326 bio_endio(bio);
327 return ERR_PTR(-EAGAIN);
328 }
329
330 *segs = nsegs;
331
332 /*
333 * Individual bvecs might not be logical block aligned. Round down the
334 * split size so that each bio is properly block size aligned, even if
335 * we do not use the full hardware limits.
336 */
337 bytes = ALIGN_DOWN(bytes, bio_split_alignment(bio, lim));
338
339 /*
340 * Bio splitting may cause subtle trouble such as hang when doing sync
341 * iopoll in direct IO routine. Given performance gain of iopoll for
342 * big IO can be trival, disable iopoll when split needed.
343 */
344 bio_clear_polled(bio);
345 return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
346 }
347 EXPORT_SYMBOL_GPL(bio_split_rw);
348
349 /**
350 * __bio_split_to_limits - split a bio to fit the queue limits
351 * @bio: bio to be split
352 * @lim: queue limits to split based on
353 * @nr_segs: returns the number of segments in the returned bio
354 *
355 * Check if @bio needs splitting based on the queue limits, and if so split off
356 * a bio fitting the limits from the beginning of @bio and return it. @bio is
357 * shortened to the remainder and re-submitted.
358 *
359 * The split bio is allocated from @q->bio_split, which is provided by the
360 * block layer.
361 */
__bio_split_to_limits(struct bio * bio,const struct queue_limits * lim,unsigned int * nr_segs)362 struct bio *__bio_split_to_limits(struct bio *bio,
363 const struct queue_limits *lim,
364 unsigned int *nr_segs)
365 {
366 struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
367 struct bio *split;
368
369 switch (bio_op(bio)) {
370 case REQ_OP_DISCARD:
371 case REQ_OP_SECURE_ERASE:
372 split = bio_split_discard(bio, lim, nr_segs, bs);
373 break;
374 case REQ_OP_WRITE_ZEROES:
375 split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
376 break;
377 default:
378 split = bio_split_rw(bio, lim, nr_segs, bs,
379 get_max_io_size(bio, lim) << SECTOR_SHIFT);
380 if (IS_ERR(split))
381 return NULL;
382 break;
383 }
384
385 if (split) {
386 /* there isn't chance to merge the split bio */
387 split->bi_opf |= REQ_NOMERGE;
388
389 blkcg_bio_issue_init(split);
390 bio_chain(split, bio);
391 trace_block_split(split, bio->bi_iter.bi_sector);
392 submit_bio_noacct(bio);
393 return split;
394 }
395 return bio;
396 }
397
398 /**
399 * bio_split_to_limits - split a bio to fit the queue limits
400 * @bio: bio to be split
401 *
402 * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
403 * if so split off a bio fitting the limits from the beginning of @bio and
404 * return it. @bio is shortened to the remainder and re-submitted.
405 *
406 * The split bio is allocated from @q->bio_split, which is provided by the
407 * block layer.
408 */
bio_split_to_limits(struct bio * bio)409 struct bio *bio_split_to_limits(struct bio *bio)
410 {
411 const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
412 unsigned int nr_segs;
413
414 if (bio_may_exceed_limits(bio, lim))
415 return __bio_split_to_limits(bio, lim, &nr_segs);
416 return bio;
417 }
418 EXPORT_SYMBOL(bio_split_to_limits);
419
blk_recalc_rq_segments(struct request * rq)420 unsigned int blk_recalc_rq_segments(struct request *rq)
421 {
422 unsigned int nr_phys_segs = 0;
423 unsigned int bytes = 0;
424 struct req_iterator iter;
425 struct bio_vec bv;
426
427 if (!rq->bio)
428 return 0;
429
430 switch (bio_op(rq->bio)) {
431 case REQ_OP_DISCARD:
432 case REQ_OP_SECURE_ERASE:
433 if (queue_max_discard_segments(rq->q) > 1) {
434 struct bio *bio = rq->bio;
435
436 for_each_bio(bio)
437 nr_phys_segs++;
438 return nr_phys_segs;
439 }
440 return 1;
441 case REQ_OP_WRITE_ZEROES:
442 return 0;
443 default:
444 break;
445 }
446
447 rq_for_each_bvec(bv, rq, iter)
448 bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
449 UINT_MAX, UINT_MAX);
450 return nr_phys_segs;
451 }
452
blk_next_sg(struct scatterlist ** sg,struct scatterlist * sglist)453 static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
454 struct scatterlist *sglist)
455 {
456 if (!*sg)
457 return sglist;
458
459 /*
460 * If the driver previously mapped a shorter list, we could see a
461 * termination bit prematurely unless it fully inits the sg table
462 * on each mapping. We KNOW that there must be more entries here
463 * or the driver would be buggy, so force clear the termination bit
464 * to avoid doing a full sg_init_table() in drivers for each command.
465 */
466 sg_unmark_end(*sg);
467 return sg_next(*sg);
468 }
469
blk_bvec_map_sg(struct request_queue * q,struct bio_vec * bvec,struct scatterlist * sglist,struct scatterlist ** sg)470 static unsigned blk_bvec_map_sg(struct request_queue *q,
471 struct bio_vec *bvec, struct scatterlist *sglist,
472 struct scatterlist **sg)
473 {
474 unsigned nbytes = bvec->bv_len;
475 unsigned nsegs = 0, total = 0;
476
477 while (nbytes > 0) {
478 unsigned offset = bvec->bv_offset + total;
479 unsigned len = min(get_max_segment_size(&q->limits,
480 bvec->bv_page, offset), nbytes);
481 struct page *page = bvec->bv_page;
482
483 /*
484 * Unfortunately a fair number of drivers barf on scatterlists
485 * that have an offset larger than PAGE_SIZE, despite other
486 * subsystems dealing with that invariant just fine. For now
487 * stick to the legacy format where we never present those from
488 * the block layer, but the code below should be removed once
489 * these offenders (mostly MMC/SD drivers) are fixed.
490 */
491 page += (offset >> PAGE_SHIFT);
492 offset &= ~PAGE_MASK;
493
494 *sg = blk_next_sg(sg, sglist);
495 sg_set_page(*sg, page, len, offset);
496
497 total += len;
498 nbytes -= len;
499 nsegs++;
500 }
501
502 return nsegs;
503 }
504
__blk_bvec_map_sg(struct bio_vec bv,struct scatterlist * sglist,struct scatterlist ** sg)505 static inline int __blk_bvec_map_sg(struct bio_vec bv,
506 struct scatterlist *sglist, struct scatterlist **sg)
507 {
508 *sg = blk_next_sg(sg, sglist);
509 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
510 return 1;
511 }
512
513 /* only try to merge bvecs into one sg if they are from two bios */
514 static inline bool
__blk_segment_map_sg_merge(struct request_queue * q,struct bio_vec * bvec,struct bio_vec * bvprv,struct scatterlist ** sg)515 __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
516 struct bio_vec *bvprv, struct scatterlist **sg)
517 {
518
519 int nbytes = bvec->bv_len;
520
521 if (!*sg)
522 return false;
523
524 if ((*sg)->length + nbytes > queue_max_segment_size(q))
525 return false;
526
527 if (!biovec_phys_mergeable(q, bvprv, bvec))
528 return false;
529
530 (*sg)->length += nbytes;
531
532 return true;
533 }
534
__blk_bios_map_sg(struct request_queue * q,struct bio * bio,struct scatterlist * sglist,struct scatterlist ** sg)535 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
536 struct scatterlist *sglist,
537 struct scatterlist **sg)
538 {
539 struct bio_vec bvec, bvprv = { NULL };
540 struct bvec_iter iter;
541 int nsegs = 0;
542 bool new_bio = false;
543
544 for_each_bio(bio) {
545 bio_for_each_bvec(bvec, bio, iter) {
546 /*
547 * Only try to merge bvecs from two bios given we
548 * have done bio internal merge when adding pages
549 * to bio
550 */
551 if (new_bio &&
552 __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
553 goto next_bvec;
554
555 if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
556 nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
557 else
558 nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
559 next_bvec:
560 new_bio = false;
561 }
562 if (likely(bio->bi_iter.bi_size)) {
563 bvprv = bvec;
564 new_bio = true;
565 }
566 }
567
568 return nsegs;
569 }
570
571 /*
572 * map a request to scatterlist, return number of sg entries setup. Caller
573 * must make sure sg can hold rq->nr_phys_segments entries
574 */
__blk_rq_map_sg(struct request_queue * q,struct request * rq,struct scatterlist * sglist,struct scatterlist ** last_sg)575 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
576 struct scatterlist *sglist, struct scatterlist **last_sg)
577 {
578 int nsegs = 0;
579
580 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
581 nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
582 else if (rq->bio)
583 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
584
585 if (*last_sg)
586 sg_mark_end(*last_sg);
587
588 /*
589 * Something must have been wrong if the figured number of
590 * segment is bigger than number of req's physical segments
591 */
592 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
593
594 return nsegs;
595 }
596 EXPORT_SYMBOL(__blk_rq_map_sg);
597
blk_rq_get_max_sectors(struct request * rq,sector_t offset)598 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
599 sector_t offset)
600 {
601 struct request_queue *q = rq->q;
602 unsigned int max_sectors;
603
604 if (blk_rq_is_passthrough(rq))
605 return q->limits.max_hw_sectors;
606
607 max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
608 if (!q->limits.chunk_sectors ||
609 req_op(rq) == REQ_OP_DISCARD ||
610 req_op(rq) == REQ_OP_SECURE_ERASE)
611 return max_sectors;
612 return min(max_sectors,
613 blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
614 }
615
ll_new_hw_segment(struct request * req,struct bio * bio,unsigned int nr_phys_segs)616 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
617 unsigned int nr_phys_segs)
618 {
619 if (!blk_cgroup_mergeable(req, bio))
620 goto no_merge;
621
622 if (blk_integrity_merge_bio(req->q, req, bio) == false)
623 goto no_merge;
624
625 /* discard request merge won't add new segment */
626 if (req_op(req) == REQ_OP_DISCARD)
627 return 1;
628
629 if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
630 goto no_merge;
631
632 /*
633 * This will form the start of a new hw segment. Bump both
634 * counters.
635 */
636 req->nr_phys_segments += nr_phys_segs;
637 return 1;
638
639 no_merge:
640 req_set_nomerge(req->q, req);
641 return 0;
642 }
643
ll_back_merge_fn(struct request * req,struct bio * bio,unsigned int nr_segs)644 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
645 {
646 if (req_gap_back_merge(req, bio))
647 return 0;
648 if (blk_integrity_rq(req) &&
649 integrity_req_gap_back_merge(req, bio))
650 return 0;
651 if (!bio_crypt_ctx_back_mergeable(req, bio))
652 return 0;
653 if (blk_rq_sectors(req) + bio_sectors(bio) >
654 blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
655 req_set_nomerge(req->q, req);
656 return 0;
657 }
658
659 return ll_new_hw_segment(req, bio, nr_segs);
660 }
661
ll_front_merge_fn(struct request * req,struct bio * bio,unsigned int nr_segs)662 static int ll_front_merge_fn(struct request *req, struct bio *bio,
663 unsigned int nr_segs)
664 {
665 if (req_gap_front_merge(req, bio))
666 return 0;
667 if (blk_integrity_rq(req) &&
668 integrity_req_gap_front_merge(req, bio))
669 return 0;
670 if (!bio_crypt_ctx_front_mergeable(req, bio))
671 return 0;
672 if (blk_rq_sectors(req) + bio_sectors(bio) >
673 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
674 req_set_nomerge(req->q, req);
675 return 0;
676 }
677
678 return ll_new_hw_segment(req, bio, nr_segs);
679 }
680
req_attempt_discard_merge(struct request_queue * q,struct request * req,struct request * next)681 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
682 struct request *next)
683 {
684 unsigned short segments = blk_rq_nr_discard_segments(req);
685
686 if (segments >= queue_max_discard_segments(q))
687 goto no_merge;
688 if (blk_rq_sectors(req) + bio_sectors(next->bio) >
689 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
690 goto no_merge;
691
692 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
693 return true;
694 no_merge:
695 req_set_nomerge(q, req);
696 return false;
697 }
698
ll_merge_requests_fn(struct request_queue * q,struct request * req,struct request * next)699 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
700 struct request *next)
701 {
702 int total_phys_segments;
703
704 if (req_gap_back_merge(req, next->bio))
705 return 0;
706
707 /*
708 * Will it become too large?
709 */
710 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
711 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
712 return 0;
713
714 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
715 if (total_phys_segments > blk_rq_get_max_segments(req))
716 return 0;
717
718 if (!blk_cgroup_mergeable(req, next->bio))
719 return 0;
720
721 if (blk_integrity_merge_rq(q, req, next) == false)
722 return 0;
723
724 if (!bio_crypt_ctx_merge_rq(req, next))
725 return 0;
726
727 /* Merge is OK... */
728 req->nr_phys_segments = total_phys_segments;
729 return 1;
730 }
731
732 /**
733 * blk_rq_set_mixed_merge - mark a request as mixed merge
734 * @rq: request to mark as mixed merge
735 *
736 * Description:
737 * @rq is about to be mixed merged. Make sure the attributes
738 * which can be mixed are set in each bio and mark @rq as mixed
739 * merged.
740 */
blk_rq_set_mixed_merge(struct request * rq)741 void blk_rq_set_mixed_merge(struct request *rq)
742 {
743 blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
744 struct bio *bio;
745
746 if (rq->rq_flags & RQF_MIXED_MERGE)
747 return;
748
749 /*
750 * @rq will no longer represent mixable attributes for all the
751 * contained bios. It will just track those of the first one.
752 * Distributes the attributs to each bio.
753 */
754 for (bio = rq->bio; bio; bio = bio->bi_next) {
755 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
756 (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
757 bio->bi_opf |= ff;
758 }
759 rq->rq_flags |= RQF_MIXED_MERGE;
760 }
761
bio_failfast(const struct bio * bio)762 static inline blk_opf_t bio_failfast(const struct bio *bio)
763 {
764 if (bio->bi_opf & REQ_RAHEAD)
765 return REQ_FAILFAST_MASK;
766
767 return bio->bi_opf & REQ_FAILFAST_MASK;
768 }
769
770 /*
771 * After we are marked as MIXED_MERGE, any new RA bio has to be updated
772 * as failfast, and request's failfast has to be updated in case of
773 * front merge.
774 */
blk_update_mixed_merge(struct request * req,struct bio * bio,bool front_merge)775 static inline void blk_update_mixed_merge(struct request *req,
776 struct bio *bio, bool front_merge)
777 {
778 if (req->rq_flags & RQF_MIXED_MERGE) {
779 if (bio->bi_opf & REQ_RAHEAD)
780 bio->bi_opf |= REQ_FAILFAST_MASK;
781
782 if (front_merge) {
783 req->cmd_flags &= ~REQ_FAILFAST_MASK;
784 req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK;
785 }
786 }
787 }
788
blk_account_io_merge_request(struct request * req)789 static void blk_account_io_merge_request(struct request *req)
790 {
791 if (blk_do_io_stat(req)) {
792 part_stat_lock();
793 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
794 part_stat_local_dec(req->part,
795 in_flight[op_is_write(req_op(req))]);
796 part_stat_unlock();
797 }
798 }
799
blk_try_req_merge(struct request * req,struct request * next)800 static enum elv_merge blk_try_req_merge(struct request *req,
801 struct request *next)
802 {
803 if (blk_discard_mergable(req))
804 return ELEVATOR_DISCARD_MERGE;
805 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
806 return ELEVATOR_BACK_MERGE;
807
808 return ELEVATOR_NO_MERGE;
809 }
810
811 /*
812 * For non-mq, this has to be called with the request spinlock acquired.
813 * For mq with scheduling, the appropriate queue wide lock should be held.
814 */
attempt_merge(struct request_queue * q,struct request * req,struct request * next)815 static struct request *attempt_merge(struct request_queue *q,
816 struct request *req, struct request *next)
817 {
818 if (!rq_mergeable(req) || !rq_mergeable(next))
819 return NULL;
820
821 if (req_op(req) != req_op(next))
822 return NULL;
823
824 if (rq_data_dir(req) != rq_data_dir(next))
825 return NULL;
826
827 if (req->ioprio != next->ioprio)
828 return NULL;
829
830 /*
831 * If we are allowed to merge, then append bio list
832 * from next to rq and release next. merge_requests_fn
833 * will have updated segment counts, update sector
834 * counts here. Handle DISCARDs separately, as they
835 * have separate settings.
836 */
837
838 switch (blk_try_req_merge(req, next)) {
839 case ELEVATOR_DISCARD_MERGE:
840 if (!req_attempt_discard_merge(q, req, next))
841 return NULL;
842 break;
843 case ELEVATOR_BACK_MERGE:
844 if (!ll_merge_requests_fn(q, req, next))
845 return NULL;
846 break;
847 default:
848 return NULL;
849 }
850
851 /*
852 * If failfast settings disagree or any of the two is already
853 * a mixed merge, mark both as mixed before proceeding. This
854 * makes sure that all involved bios have mixable attributes
855 * set properly.
856 */
857 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
858 (req->cmd_flags & REQ_FAILFAST_MASK) !=
859 (next->cmd_flags & REQ_FAILFAST_MASK)) {
860 blk_rq_set_mixed_merge(req);
861 blk_rq_set_mixed_merge(next);
862 }
863
864 /*
865 * At this point we have either done a back merge or front merge. We
866 * need the smaller start_time_ns of the merged requests to be the
867 * current request for accounting purposes.
868 */
869 if (next->start_time_ns < req->start_time_ns)
870 req->start_time_ns = next->start_time_ns;
871
872 req->biotail->bi_next = next->bio;
873 req->biotail = next->biotail;
874
875 req->__data_len += blk_rq_bytes(next);
876
877 if (!blk_discard_mergable(req))
878 elv_merge_requests(q, req, next);
879
880 blk_crypto_rq_put_keyslot(next);
881
882 /*
883 * 'next' is going away, so update stats accordingly
884 */
885 blk_account_io_merge_request(next);
886
887 trace_block_rq_merge(next);
888
889 /*
890 * ownership of bio passed from next to req, return 'next' for
891 * the caller to free
892 */
893 next->bio = NULL;
894 return next;
895 }
896
attempt_back_merge(struct request_queue * q,struct request * rq)897 static struct request *attempt_back_merge(struct request_queue *q,
898 struct request *rq)
899 {
900 struct request *next = elv_latter_request(q, rq);
901
902 if (next)
903 return attempt_merge(q, rq, next);
904
905 return NULL;
906 }
907
attempt_front_merge(struct request_queue * q,struct request * rq)908 static struct request *attempt_front_merge(struct request_queue *q,
909 struct request *rq)
910 {
911 struct request *prev = elv_former_request(q, rq);
912
913 if (prev)
914 return attempt_merge(q, prev, rq);
915
916 return NULL;
917 }
918
919 /*
920 * Try to merge 'next' into 'rq'. Return true if the merge happened, false
921 * otherwise. The caller is responsible for freeing 'next' if the merge
922 * happened.
923 */
blk_attempt_req_merge(struct request_queue * q,struct request * rq,struct request * next)924 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
925 struct request *next)
926 {
927 return attempt_merge(q, rq, next);
928 }
929
blk_rq_merge_ok(struct request * rq,struct bio * bio)930 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
931 {
932 if (!rq_mergeable(rq) || !bio_mergeable(bio))
933 return false;
934
935 if (req_op(rq) != bio_op(bio))
936 return false;
937
938 /* different data direction or already started, don't merge */
939 if (bio_data_dir(bio) != rq_data_dir(rq))
940 return false;
941
942 /* don't merge across cgroup boundaries */
943 if (!blk_cgroup_mergeable(rq, bio))
944 return false;
945
946 /* only merge integrity protected bio into ditto rq */
947 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
948 return false;
949
950 /* Only merge if the crypt contexts are compatible */
951 if (!bio_crypt_rq_ctx_compatible(rq, bio))
952 return false;
953
954 if (rq->ioprio != bio_prio(bio))
955 return false;
956
957 return true;
958 }
959
blk_try_merge(struct request * rq,struct bio * bio)960 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
961 {
962 if (blk_discard_mergable(rq))
963 return ELEVATOR_DISCARD_MERGE;
964 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
965 return ELEVATOR_BACK_MERGE;
966 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
967 return ELEVATOR_FRONT_MERGE;
968 return ELEVATOR_NO_MERGE;
969 }
970
blk_account_io_merge_bio(struct request * req)971 static void blk_account_io_merge_bio(struct request *req)
972 {
973 if (!blk_do_io_stat(req))
974 return;
975
976 part_stat_lock();
977 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
978 part_stat_unlock();
979 }
980
981 enum bio_merge_status {
982 BIO_MERGE_OK,
983 BIO_MERGE_NONE,
984 BIO_MERGE_FAILED,
985 };
986
bio_attempt_back_merge(struct request * req,struct bio * bio,unsigned int nr_segs)987 static enum bio_merge_status bio_attempt_back_merge(struct request *req,
988 struct bio *bio, unsigned int nr_segs)
989 {
990 const blk_opf_t ff = bio_failfast(bio);
991
992 if (!ll_back_merge_fn(req, bio, nr_segs))
993 return BIO_MERGE_FAILED;
994
995 trace_block_bio_backmerge(bio);
996 rq_qos_merge(req->q, req, bio);
997
998 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
999 blk_rq_set_mixed_merge(req);
1000
1001 blk_update_mixed_merge(req, bio, false);
1002
1003 req->biotail->bi_next = bio;
1004 req->biotail = bio;
1005 req->__data_len += bio->bi_iter.bi_size;
1006
1007 bio_crypt_free_ctx(bio);
1008
1009 blk_account_io_merge_bio(req);
1010 return BIO_MERGE_OK;
1011 }
1012
bio_attempt_front_merge(struct request * req,struct bio * bio,unsigned int nr_segs)1013 static enum bio_merge_status bio_attempt_front_merge(struct request *req,
1014 struct bio *bio, unsigned int nr_segs)
1015 {
1016 const blk_opf_t ff = bio_failfast(bio);
1017
1018 if (!ll_front_merge_fn(req, bio, nr_segs))
1019 return BIO_MERGE_FAILED;
1020
1021 trace_block_bio_frontmerge(bio);
1022 rq_qos_merge(req->q, req, bio);
1023
1024 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1025 blk_rq_set_mixed_merge(req);
1026
1027 blk_update_mixed_merge(req, bio, true);
1028
1029 bio->bi_next = req->bio;
1030 req->bio = bio;
1031
1032 req->__sector = bio->bi_iter.bi_sector;
1033 req->__data_len += bio->bi_iter.bi_size;
1034
1035 bio_crypt_do_front_merge(req, bio);
1036
1037 blk_account_io_merge_bio(req);
1038 return BIO_MERGE_OK;
1039 }
1040
bio_attempt_discard_merge(struct request_queue * q,struct request * req,struct bio * bio)1041 static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
1042 struct request *req, struct bio *bio)
1043 {
1044 unsigned short segments = blk_rq_nr_discard_segments(req);
1045
1046 if (segments >= queue_max_discard_segments(q))
1047 goto no_merge;
1048 if (blk_rq_sectors(req) + bio_sectors(bio) >
1049 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1050 goto no_merge;
1051
1052 rq_qos_merge(q, req, bio);
1053
1054 req->biotail->bi_next = bio;
1055 req->biotail = bio;
1056 req->__data_len += bio->bi_iter.bi_size;
1057 req->nr_phys_segments = segments + 1;
1058
1059 blk_account_io_merge_bio(req);
1060 return BIO_MERGE_OK;
1061 no_merge:
1062 req_set_nomerge(q, req);
1063 return BIO_MERGE_FAILED;
1064 }
1065
blk_attempt_bio_merge(struct request_queue * q,struct request * rq,struct bio * bio,unsigned int nr_segs,bool sched_allow_merge)1066 static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
1067 struct request *rq,
1068 struct bio *bio,
1069 unsigned int nr_segs,
1070 bool sched_allow_merge)
1071 {
1072 if (!blk_rq_merge_ok(rq, bio))
1073 return BIO_MERGE_NONE;
1074
1075 switch (blk_try_merge(rq, bio)) {
1076 case ELEVATOR_BACK_MERGE:
1077 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1078 return bio_attempt_back_merge(rq, bio, nr_segs);
1079 break;
1080 case ELEVATOR_FRONT_MERGE:
1081 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1082 return bio_attempt_front_merge(rq, bio, nr_segs);
1083 break;
1084 case ELEVATOR_DISCARD_MERGE:
1085 return bio_attempt_discard_merge(q, rq, bio);
1086 default:
1087 return BIO_MERGE_NONE;
1088 }
1089
1090 return BIO_MERGE_FAILED;
1091 }
1092
1093 /**
1094 * blk_attempt_plug_merge - try to merge with %current's plugged list
1095 * @q: request_queue new bio is being queued at
1096 * @bio: new bio being queued
1097 * @nr_segs: number of segments in @bio
1098 * from the passed in @q already in the plug list
1099 *
1100 * Determine whether @bio being queued on @q can be merged with the previous
1101 * request on %current's plugged list. Returns %true if merge was successful,
1102 * otherwise %false.
1103 *
1104 * Plugging coalesces IOs from the same issuer for the same purpose without
1105 * going through @q->queue_lock. As such it's more of an issuing mechanism
1106 * than scheduling, and the request, while may have elvpriv data, is not
1107 * added on the elevator at this point. In addition, we don't have
1108 * reliable access to the elevator outside queue lock. Only check basic
1109 * merging parameters without querying the elevator.
1110 *
1111 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1112 */
blk_attempt_plug_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)1113 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1114 unsigned int nr_segs)
1115 {
1116 struct blk_plug *plug;
1117 struct request *rq;
1118
1119 plug = blk_mq_plug(bio);
1120 if (!plug || rq_list_empty(plug->mq_list))
1121 return false;
1122
1123 rq_list_for_each(&plug->mq_list, rq) {
1124 if (rq->q == q) {
1125 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1126 BIO_MERGE_OK)
1127 return true;
1128 break;
1129 }
1130
1131 /*
1132 * Only keep iterating plug list for merges if we have multiple
1133 * queues
1134 */
1135 if (!plug->multiple_queues)
1136 break;
1137 }
1138 return false;
1139 }
1140
1141 /*
1142 * Iterate list of requests and see if we can merge this bio with any
1143 * of them.
1144 */
blk_bio_list_merge(struct request_queue * q,struct list_head * list,struct bio * bio,unsigned int nr_segs)1145 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1146 struct bio *bio, unsigned int nr_segs)
1147 {
1148 struct request *rq;
1149 int checked = 8;
1150
1151 list_for_each_entry_reverse(rq, list, queuelist) {
1152 if (!checked--)
1153 break;
1154
1155 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1156 case BIO_MERGE_NONE:
1157 continue;
1158 case BIO_MERGE_OK:
1159 return true;
1160 case BIO_MERGE_FAILED:
1161 return false;
1162 }
1163
1164 }
1165
1166 return false;
1167 }
1168 EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1169
blk_mq_sched_try_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs,struct request ** merged_request)1170 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1171 unsigned int nr_segs, struct request **merged_request)
1172 {
1173 struct request *rq;
1174
1175 switch (elv_merge(q, &rq, bio)) {
1176 case ELEVATOR_BACK_MERGE:
1177 if (!blk_mq_sched_allow_merge(q, rq, bio))
1178 return false;
1179 if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1180 return false;
1181 *merged_request = attempt_back_merge(q, rq);
1182 if (!*merged_request)
1183 elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1184 return true;
1185 case ELEVATOR_FRONT_MERGE:
1186 if (!blk_mq_sched_allow_merge(q, rq, bio))
1187 return false;
1188 if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1189 return false;
1190 *merged_request = attempt_front_merge(q, rq);
1191 if (!*merged_request)
1192 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1193 return true;
1194 case ELEVATOR_DISCARD_MERGE:
1195 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1196 default:
1197 return false;
1198 }
1199 }
1200 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
1201