1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to segment and merge handling 4 */ 5 #include <linux/kernel.h> 6 #include <linux/module.h> 7 #include <linux/bio.h> 8 #include <linux/blkdev.h> 9 #include <linux/scatterlist.h> 10 11 #include <trace/events/block.h> 12 13 #include "blk.h" 14 15 /* 16 * Check if the two bvecs from two bios can be merged to one segment. If yes, 17 * no need to check gap between the two bios since the 1st bio and the 1st bvec 18 * in the 2nd bio can be handled in one segment. 19 */ 20 static inline bool bios_segs_mergeable(struct request_queue *q, 21 struct bio *prev, struct bio_vec *prev_last_bv, 22 struct bio_vec *next_first_bv) 23 { 24 if (!biovec_phys_mergeable(q, prev_last_bv, next_first_bv)) 25 return false; 26 if (prev->bi_seg_back_size + next_first_bv->bv_len > 27 queue_max_segment_size(q)) 28 return false; 29 return true; 30 } 31 32 static inline bool bio_will_gap(struct request_queue *q, 33 struct request *prev_rq, struct bio *prev, struct bio *next) 34 { 35 struct bio_vec pb, nb; 36 37 if (!bio_has_data(prev) || !queue_virt_boundary(q)) 38 return false; 39 40 /* 41 * Don't merge if the 1st bio starts with non-zero offset, otherwise it 42 * is quite difficult to respect the sg gap limit. We work hard to 43 * merge a huge number of small single bios in case of mkfs. 44 */ 45 if (prev_rq) 46 bio_get_first_bvec(prev_rq->bio, &pb); 47 else 48 bio_get_first_bvec(prev, &pb); 49 if (pb.bv_offset & queue_virt_boundary(q)) 50 return true; 51 52 /* 53 * We don't need to worry about the situation that the merged segment 54 * ends in unaligned virt boundary: 55 * 56 * - if 'pb' ends aligned, the merged segment ends aligned 57 * - if 'pb' ends unaligned, the next bio must include 58 * one single bvec of 'nb', otherwise the 'nb' can't 59 * merge with 'pb' 60 */ 61 bio_get_last_bvec(prev, &pb); 62 bio_get_first_bvec(next, &nb); 63 if (bios_segs_mergeable(q, prev, &pb, &nb)) 64 return false; 65 return __bvec_gap_to_prev(q, &pb, nb.bv_offset); 66 } 67 68 static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 69 { 70 return bio_will_gap(req->q, req, req->biotail, bio); 71 } 72 73 static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 74 { 75 return bio_will_gap(req->q, NULL, bio, req->bio); 76 } 77 78 static struct bio *blk_bio_discard_split(struct request_queue *q, 79 struct bio *bio, 80 struct bio_set *bs, 81 unsigned *nsegs) 82 { 83 unsigned int max_discard_sectors, granularity; 84 int alignment; 85 sector_t tmp; 86 unsigned split_sectors; 87 88 *nsegs = 1; 89 90 /* Zero-sector (unknown) and one-sector granularities are the same. */ 91 granularity = max(q->limits.discard_granularity >> 9, 1U); 92 93 max_discard_sectors = min(q->limits.max_discard_sectors, 94 bio_allowed_max_sectors(q)); 95 max_discard_sectors -= max_discard_sectors % granularity; 96 97 if (unlikely(!max_discard_sectors)) { 98 /* XXX: warn */ 99 return NULL; 100 } 101 102 if (bio_sectors(bio) <= max_discard_sectors) 103 return NULL; 104 105 split_sectors = max_discard_sectors; 106 107 /* 108 * If the next starting sector would be misaligned, stop the discard at 109 * the previous aligned sector. 110 */ 111 alignment = (q->limits.discard_alignment >> 9) % granularity; 112 113 tmp = bio->bi_iter.bi_sector + split_sectors - alignment; 114 tmp = sector_div(tmp, granularity); 115 116 if (split_sectors > tmp) 117 split_sectors -= tmp; 118 119 return bio_split(bio, split_sectors, GFP_NOIO, bs); 120 } 121 122 static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, 123 struct bio *bio, struct bio_set *bs, unsigned *nsegs) 124 { 125 *nsegs = 1; 126 127 if (!q->limits.max_write_zeroes_sectors) 128 return NULL; 129 130 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) 131 return NULL; 132 133 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); 134 } 135 136 static struct bio *blk_bio_write_same_split(struct request_queue *q, 137 struct bio *bio, 138 struct bio_set *bs, 139 unsigned *nsegs) 140 { 141 *nsegs = 1; 142 143 if (!q->limits.max_write_same_sectors) 144 return NULL; 145 146 if (bio_sectors(bio) <= q->limits.max_write_same_sectors) 147 return NULL; 148 149 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); 150 } 151 152 static inline unsigned get_max_io_size(struct request_queue *q, 153 struct bio *bio) 154 { 155 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); 156 unsigned mask = queue_logical_block_size(q) - 1; 157 158 /* aligned to logical block size */ 159 sectors &= ~(mask >> 9); 160 161 return sectors; 162 } 163 164 static unsigned get_max_segment_size(struct request_queue *q, 165 unsigned offset) 166 { 167 unsigned long mask = queue_segment_boundary(q); 168 169 /* default segment boundary mask means no boundary limit */ 170 if (mask == BLK_SEG_BOUNDARY_MASK) 171 return queue_max_segment_size(q); 172 173 return min_t(unsigned long, mask - (mask & offset) + 1, 174 queue_max_segment_size(q)); 175 } 176 177 /* 178 * Split the bvec @bv into segments, and update all kinds of 179 * variables. 180 */ 181 static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv, 182 unsigned *nsegs, unsigned *last_seg_size, 183 unsigned *front_seg_size, unsigned *sectors, unsigned max_segs) 184 { 185 unsigned len = bv->bv_len; 186 unsigned total_len = 0; 187 unsigned new_nsegs = 0, seg_size = 0; 188 189 /* 190 * Multi-page bvec may be too big to hold in one segment, so the 191 * current bvec has to be splitted as multiple segments. 192 */ 193 while (len && new_nsegs + *nsegs < max_segs) { 194 seg_size = get_max_segment_size(q, bv->bv_offset + total_len); 195 seg_size = min(seg_size, len); 196 197 new_nsegs++; 198 total_len += seg_size; 199 len -= seg_size; 200 201 if ((bv->bv_offset + total_len) & queue_virt_boundary(q)) 202 break; 203 } 204 205 if (!new_nsegs) 206 return !!len; 207 208 /* update front segment size */ 209 if (!*nsegs) { 210 unsigned first_seg_size; 211 212 if (new_nsegs == 1) 213 first_seg_size = get_max_segment_size(q, bv->bv_offset); 214 else 215 first_seg_size = queue_max_segment_size(q); 216 217 if (*front_seg_size < first_seg_size) 218 *front_seg_size = first_seg_size; 219 } 220 221 /* update other varibles */ 222 *last_seg_size = seg_size; 223 *nsegs += new_nsegs; 224 if (sectors) 225 *sectors += total_len >> 9; 226 227 /* split in the middle of the bvec if len != 0 */ 228 return !!len; 229 } 230 231 static struct bio *blk_bio_segment_split(struct request_queue *q, 232 struct bio *bio, 233 struct bio_set *bs, 234 unsigned *segs) 235 { 236 struct bio_vec bv, bvprv, *bvprvp = NULL; 237 struct bvec_iter iter; 238 unsigned seg_size = 0, nsegs = 0, sectors = 0; 239 unsigned front_seg_size = bio->bi_seg_front_size; 240 bool do_split = true; 241 struct bio *new = NULL; 242 const unsigned max_sectors = get_max_io_size(q, bio); 243 const unsigned max_segs = queue_max_segments(q); 244 245 bio_for_each_bvec(bv, bio, iter) { 246 /* 247 * If the queue doesn't support SG gaps and adding this 248 * offset would create a gap, disallow it. 249 */ 250 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) 251 goto split; 252 253 if (sectors + (bv.bv_len >> 9) > max_sectors) { 254 /* 255 * Consider this a new segment if we're splitting in 256 * the middle of this vector. 257 */ 258 if (nsegs < max_segs && 259 sectors < max_sectors) { 260 /* split in the middle of bvec */ 261 bv.bv_len = (max_sectors - sectors) << 9; 262 bvec_split_segs(q, &bv, &nsegs, 263 &seg_size, 264 &front_seg_size, 265 §ors, max_segs); 266 } 267 goto split; 268 } 269 270 if (bvprvp) { 271 if (seg_size + bv.bv_len > queue_max_segment_size(q)) 272 goto new_segment; 273 if (!biovec_phys_mergeable(q, bvprvp, &bv)) 274 goto new_segment; 275 276 seg_size += bv.bv_len; 277 bvprv = bv; 278 bvprvp = &bvprv; 279 sectors += bv.bv_len >> 9; 280 281 if (nsegs == 1 && seg_size > front_seg_size) 282 front_seg_size = seg_size; 283 284 continue; 285 } 286 new_segment: 287 if (nsegs == max_segs) 288 goto split; 289 290 bvprv = bv; 291 bvprvp = &bvprv; 292 293 if (bv.bv_offset + bv.bv_len <= PAGE_SIZE) { 294 nsegs++; 295 seg_size = bv.bv_len; 296 sectors += bv.bv_len >> 9; 297 if (nsegs == 1 && seg_size > front_seg_size) 298 front_seg_size = seg_size; 299 } else if (bvec_split_segs(q, &bv, &nsegs, &seg_size, 300 &front_seg_size, §ors, max_segs)) { 301 goto split; 302 } 303 } 304 305 do_split = false; 306 split: 307 *segs = nsegs; 308 309 if (do_split) { 310 new = bio_split(bio, sectors, GFP_NOIO, bs); 311 if (new) 312 bio = new; 313 } 314 315 bio->bi_seg_front_size = front_seg_size; 316 if (seg_size > bio->bi_seg_back_size) 317 bio->bi_seg_back_size = seg_size; 318 319 return do_split ? new : NULL; 320 } 321 322 void blk_queue_split(struct request_queue *q, struct bio **bio) 323 { 324 struct bio *split, *res; 325 unsigned nsegs; 326 327 switch (bio_op(*bio)) { 328 case REQ_OP_DISCARD: 329 case REQ_OP_SECURE_ERASE: 330 split = blk_bio_discard_split(q, *bio, &q->bio_split, &nsegs); 331 break; 332 case REQ_OP_WRITE_ZEROES: 333 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, &nsegs); 334 break; 335 case REQ_OP_WRITE_SAME: 336 split = blk_bio_write_same_split(q, *bio, &q->bio_split, &nsegs); 337 break; 338 default: 339 split = blk_bio_segment_split(q, *bio, &q->bio_split, &nsegs); 340 break; 341 } 342 343 /* physical segments can be figured out during splitting */ 344 res = split ? split : *bio; 345 res->bi_phys_segments = nsegs; 346 bio_set_flag(res, BIO_SEG_VALID); 347 348 if (split) { 349 /* there isn't chance to merge the splitted bio */ 350 split->bi_opf |= REQ_NOMERGE; 351 352 /* 353 * Since we're recursing into make_request here, ensure 354 * that we mark this bio as already having entered the queue. 355 * If not, and the queue is going away, we can get stuck 356 * forever on waiting for the queue reference to drop. But 357 * that will never happen, as we're already holding a 358 * reference to it. 359 */ 360 bio_set_flag(*bio, BIO_QUEUE_ENTERED); 361 362 bio_chain(split, *bio); 363 trace_block_split(q, split, (*bio)->bi_iter.bi_sector); 364 generic_make_request(*bio); 365 *bio = split; 366 } 367 } 368 EXPORT_SYMBOL(blk_queue_split); 369 370 static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 371 struct bio *bio) 372 { 373 struct bio_vec bv, bvprv = { NULL }; 374 int prev = 0; 375 unsigned int seg_size, nr_phys_segs; 376 unsigned front_seg_size; 377 struct bio *fbio, *bbio; 378 struct bvec_iter iter; 379 380 if (!bio) 381 return 0; 382 383 front_seg_size = bio->bi_seg_front_size; 384 385 switch (bio_op(bio)) { 386 case REQ_OP_DISCARD: 387 case REQ_OP_SECURE_ERASE: 388 case REQ_OP_WRITE_ZEROES: 389 return 0; 390 case REQ_OP_WRITE_SAME: 391 return 1; 392 } 393 394 fbio = bio; 395 seg_size = 0; 396 nr_phys_segs = 0; 397 for_each_bio(bio) { 398 bio_for_each_bvec(bv, bio, iter) { 399 if (prev) { 400 if (seg_size + bv.bv_len 401 > queue_max_segment_size(q)) 402 goto new_segment; 403 if (!biovec_phys_mergeable(q, &bvprv, &bv)) 404 goto new_segment; 405 406 seg_size += bv.bv_len; 407 bvprv = bv; 408 409 if (nr_phys_segs == 1 && seg_size > 410 front_seg_size) 411 front_seg_size = seg_size; 412 413 continue; 414 } 415 new_segment: 416 bvprv = bv; 417 prev = 1; 418 bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size, 419 &front_seg_size, NULL, UINT_MAX); 420 } 421 bbio = bio; 422 } 423 424 fbio->bi_seg_front_size = front_seg_size; 425 if (seg_size > bbio->bi_seg_back_size) 426 bbio->bi_seg_back_size = seg_size; 427 428 return nr_phys_segs; 429 } 430 431 void blk_recalc_rq_segments(struct request *rq) 432 { 433 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); 434 } 435 436 void blk_recount_segments(struct request_queue *q, struct bio *bio) 437 { 438 struct bio *nxt = bio->bi_next; 439 440 bio->bi_next = NULL; 441 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); 442 bio->bi_next = nxt; 443 444 bio_set_flag(bio, BIO_SEG_VALID); 445 } 446 447 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 448 struct bio *nxt) 449 { 450 struct bio_vec end_bv = { NULL }, nxt_bv; 451 452 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > 453 queue_max_segment_size(q)) 454 return 0; 455 456 if (!bio_has_data(bio)) 457 return 1; 458 459 bio_get_last_bvec(bio, &end_bv); 460 bio_get_first_bvec(nxt, &nxt_bv); 461 462 return biovec_phys_mergeable(q, &end_bv, &nxt_bv); 463 } 464 465 static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, 466 struct scatterlist *sglist) 467 { 468 if (!*sg) 469 return sglist; 470 471 /* 472 * If the driver previously mapped a shorter list, we could see a 473 * termination bit prematurely unless it fully inits the sg table 474 * on each mapping. We KNOW that there must be more entries here 475 * or the driver would be buggy, so force clear the termination bit 476 * to avoid doing a full sg_init_table() in drivers for each command. 477 */ 478 sg_unmark_end(*sg); 479 return sg_next(*sg); 480 } 481 482 static unsigned blk_bvec_map_sg(struct request_queue *q, 483 struct bio_vec *bvec, struct scatterlist *sglist, 484 struct scatterlist **sg) 485 { 486 unsigned nbytes = bvec->bv_len; 487 unsigned nsegs = 0, total = 0, offset = 0; 488 489 while (nbytes > 0) { 490 unsigned seg_size; 491 struct page *pg; 492 unsigned idx; 493 494 *sg = blk_next_sg(sg, sglist); 495 496 seg_size = get_max_segment_size(q, bvec->bv_offset + total); 497 seg_size = min(nbytes, seg_size); 498 499 offset = (total + bvec->bv_offset) % PAGE_SIZE; 500 idx = (total + bvec->bv_offset) / PAGE_SIZE; 501 pg = bvec_nth_page(bvec->bv_page, idx); 502 503 sg_set_page(*sg, pg, seg_size, offset); 504 505 total += seg_size; 506 nbytes -= seg_size; 507 nsegs++; 508 } 509 510 return nsegs; 511 } 512 513 static inline void 514 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, 515 struct scatterlist *sglist, struct bio_vec *bvprv, 516 struct scatterlist **sg, int *nsegs) 517 { 518 519 int nbytes = bvec->bv_len; 520 521 if (*sg) { 522 if ((*sg)->length + nbytes > queue_max_segment_size(q)) 523 goto new_segment; 524 if (!biovec_phys_mergeable(q, bvprv, bvec)) 525 goto new_segment; 526 527 (*sg)->length += nbytes; 528 } else { 529 new_segment: 530 if (bvec->bv_offset + bvec->bv_len <= PAGE_SIZE) { 531 *sg = blk_next_sg(sg, sglist); 532 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); 533 (*nsegs) += 1; 534 } else 535 (*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg); 536 } 537 *bvprv = *bvec; 538 } 539 540 static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv, 541 struct scatterlist *sglist, struct scatterlist **sg) 542 { 543 *sg = sglist; 544 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); 545 return 1; 546 } 547 548 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, 549 struct scatterlist *sglist, 550 struct scatterlist **sg) 551 { 552 struct bio_vec bvec, bvprv = { NULL }; 553 struct bvec_iter iter; 554 int nsegs = 0; 555 556 for_each_bio(bio) 557 bio_for_each_bvec(bvec, bio, iter) 558 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, 559 &nsegs); 560 561 return nsegs; 562 } 563 564 /* 565 * map a request to scatterlist, return number of sg entries setup. Caller 566 * must make sure sg can hold rq->nr_phys_segments entries 567 */ 568 int blk_rq_map_sg(struct request_queue *q, struct request *rq, 569 struct scatterlist *sglist) 570 { 571 struct scatterlist *sg = NULL; 572 int nsegs = 0; 573 574 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 575 nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg); 576 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) 577 nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg); 578 else if (rq->bio) 579 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); 580 581 if (unlikely(rq->rq_flags & RQF_COPY_USER) && 582 (blk_rq_bytes(rq) & q->dma_pad_mask)) { 583 unsigned int pad_len = 584 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; 585 586 sg->length += pad_len; 587 rq->extra_len += pad_len; 588 } 589 590 if (q->dma_drain_size && q->dma_drain_needed(rq)) { 591 if (op_is_write(req_op(rq))) 592 memset(q->dma_drain_buffer, 0, q->dma_drain_size); 593 594 sg_unmark_end(sg); 595 sg = sg_next(sg); 596 sg_set_page(sg, virt_to_page(q->dma_drain_buffer), 597 q->dma_drain_size, 598 ((unsigned long)q->dma_drain_buffer) & 599 (PAGE_SIZE - 1)); 600 nsegs++; 601 rq->extra_len += q->dma_drain_size; 602 } 603 604 if (sg) 605 sg_mark_end(sg); 606 607 /* 608 * Something must have been wrong if the figured number of 609 * segment is bigger than number of req's physical segments 610 */ 611 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); 612 613 return nsegs; 614 } 615 EXPORT_SYMBOL(blk_rq_map_sg); 616 617 static inline int ll_new_hw_segment(struct request_queue *q, 618 struct request *req, 619 struct bio *bio) 620 { 621 int nr_phys_segs = bio_phys_segments(q, bio); 622 623 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) 624 goto no_merge; 625 626 if (blk_integrity_merge_bio(q, req, bio) == false) 627 goto no_merge; 628 629 /* 630 * This will form the start of a new hw segment. Bump both 631 * counters. 632 */ 633 req->nr_phys_segments += nr_phys_segs; 634 return 1; 635 636 no_merge: 637 req_set_nomerge(q, req); 638 return 0; 639 } 640 641 int ll_back_merge_fn(struct request_queue *q, struct request *req, 642 struct bio *bio) 643 { 644 if (req_gap_back_merge(req, bio)) 645 return 0; 646 if (blk_integrity_rq(req) && 647 integrity_req_gap_back_merge(req, bio)) 648 return 0; 649 if (blk_rq_sectors(req) + bio_sectors(bio) > 650 blk_rq_get_max_sectors(req, blk_rq_pos(req))) { 651 req_set_nomerge(q, req); 652 return 0; 653 } 654 if (!bio_flagged(req->biotail, BIO_SEG_VALID)) 655 blk_recount_segments(q, req->biotail); 656 if (!bio_flagged(bio, BIO_SEG_VALID)) 657 blk_recount_segments(q, bio); 658 659 return ll_new_hw_segment(q, req, bio); 660 } 661 662 int ll_front_merge_fn(struct request_queue *q, struct request *req, 663 struct bio *bio) 664 { 665 666 if (req_gap_front_merge(req, bio)) 667 return 0; 668 if (blk_integrity_rq(req) && 669 integrity_req_gap_front_merge(req, bio)) 670 return 0; 671 if (blk_rq_sectors(req) + bio_sectors(bio) > 672 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { 673 req_set_nomerge(q, req); 674 return 0; 675 } 676 if (!bio_flagged(bio, BIO_SEG_VALID)) 677 blk_recount_segments(q, bio); 678 if (!bio_flagged(req->bio, BIO_SEG_VALID)) 679 blk_recount_segments(q, req->bio); 680 681 return ll_new_hw_segment(q, req, bio); 682 } 683 684 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, 685 struct request *next) 686 { 687 unsigned short segments = blk_rq_nr_discard_segments(req); 688 689 if (segments >= queue_max_discard_segments(q)) 690 goto no_merge; 691 if (blk_rq_sectors(req) + bio_sectors(next->bio) > 692 blk_rq_get_max_sectors(req, blk_rq_pos(req))) 693 goto no_merge; 694 695 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next); 696 return true; 697 no_merge: 698 req_set_nomerge(q, req); 699 return false; 700 } 701 702 static int ll_merge_requests_fn(struct request_queue *q, struct request *req, 703 struct request *next) 704 { 705 int total_phys_segments; 706 unsigned int seg_size = 707 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; 708 709 if (req_gap_back_merge(req, next->bio)) 710 return 0; 711 712 /* 713 * Will it become too large? 714 */ 715 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > 716 blk_rq_get_max_sectors(req, blk_rq_pos(req))) 717 return 0; 718 719 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; 720 if (blk_phys_contig_segment(q, req->biotail, next->bio)) { 721 if (req->nr_phys_segments == 1) 722 req->bio->bi_seg_front_size = seg_size; 723 if (next->nr_phys_segments == 1) 724 next->biotail->bi_seg_back_size = seg_size; 725 total_phys_segments--; 726 } 727 728 if (total_phys_segments > queue_max_segments(q)) 729 return 0; 730 731 if (blk_integrity_merge_rq(q, req, next) == false) 732 return 0; 733 734 /* Merge is OK... */ 735 req->nr_phys_segments = total_phys_segments; 736 return 1; 737 } 738 739 /** 740 * blk_rq_set_mixed_merge - mark a request as mixed merge 741 * @rq: request to mark as mixed merge 742 * 743 * Description: 744 * @rq is about to be mixed merged. Make sure the attributes 745 * which can be mixed are set in each bio and mark @rq as mixed 746 * merged. 747 */ 748 void blk_rq_set_mixed_merge(struct request *rq) 749 { 750 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 751 struct bio *bio; 752 753 if (rq->rq_flags & RQF_MIXED_MERGE) 754 return; 755 756 /* 757 * @rq will no longer represent mixable attributes for all the 758 * contained bios. It will just track those of the first one. 759 * Distributes the attributs to each bio. 760 */ 761 for (bio = rq->bio; bio; bio = bio->bi_next) { 762 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && 763 (bio->bi_opf & REQ_FAILFAST_MASK) != ff); 764 bio->bi_opf |= ff; 765 } 766 rq->rq_flags |= RQF_MIXED_MERGE; 767 } 768 769 static void blk_account_io_merge(struct request *req) 770 { 771 if (blk_do_io_stat(req)) { 772 struct hd_struct *part; 773 774 part_stat_lock(); 775 part = req->part; 776 777 part_dec_in_flight(req->q, part, rq_data_dir(req)); 778 779 hd_struct_put(part); 780 part_stat_unlock(); 781 } 782 } 783 /* 784 * Two cases of handling DISCARD merge: 785 * If max_discard_segments > 1, the driver takes every bio 786 * as a range and send them to controller together. The ranges 787 * needn't to be contiguous. 788 * Otherwise, the bios/requests will be handled as same as 789 * others which should be contiguous. 790 */ 791 static inline bool blk_discard_mergable(struct request *req) 792 { 793 if (req_op(req) == REQ_OP_DISCARD && 794 queue_max_discard_segments(req->q) > 1) 795 return true; 796 return false; 797 } 798 799 static enum elv_merge blk_try_req_merge(struct request *req, 800 struct request *next) 801 { 802 if (blk_discard_mergable(req)) 803 return ELEVATOR_DISCARD_MERGE; 804 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) 805 return ELEVATOR_BACK_MERGE; 806 807 return ELEVATOR_NO_MERGE; 808 } 809 810 /* 811 * For non-mq, this has to be called with the request spinlock acquired. 812 * For mq with scheduling, the appropriate queue wide lock should be held. 813 */ 814 static struct request *attempt_merge(struct request_queue *q, 815 struct request *req, struct request *next) 816 { 817 if (!rq_mergeable(req) || !rq_mergeable(next)) 818 return NULL; 819 820 if (req_op(req) != req_op(next)) 821 return NULL; 822 823 if (rq_data_dir(req) != rq_data_dir(next) 824 || req->rq_disk != next->rq_disk) 825 return NULL; 826 827 if (req_op(req) == REQ_OP_WRITE_SAME && 828 !blk_write_same_mergeable(req->bio, next->bio)) 829 return NULL; 830 831 /* 832 * Don't allow merge of different write hints, or for a hint with 833 * non-hint IO. 834 */ 835 if (req->write_hint != next->write_hint) 836 return NULL; 837 838 if (req->ioprio != next->ioprio) 839 return NULL; 840 841 /* 842 * If we are allowed to merge, then append bio list 843 * from next to rq and release next. merge_requests_fn 844 * will have updated segment counts, update sector 845 * counts here. Handle DISCARDs separately, as they 846 * have separate settings. 847 */ 848 849 switch (blk_try_req_merge(req, next)) { 850 case ELEVATOR_DISCARD_MERGE: 851 if (!req_attempt_discard_merge(q, req, next)) 852 return NULL; 853 break; 854 case ELEVATOR_BACK_MERGE: 855 if (!ll_merge_requests_fn(q, req, next)) 856 return NULL; 857 break; 858 default: 859 return NULL; 860 } 861 862 /* 863 * If failfast settings disagree or any of the two is already 864 * a mixed merge, mark both as mixed before proceeding. This 865 * makes sure that all involved bios have mixable attributes 866 * set properly. 867 */ 868 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || 869 (req->cmd_flags & REQ_FAILFAST_MASK) != 870 (next->cmd_flags & REQ_FAILFAST_MASK)) { 871 blk_rq_set_mixed_merge(req); 872 blk_rq_set_mixed_merge(next); 873 } 874 875 /* 876 * At this point we have either done a back merge or front merge. We 877 * need the smaller start_time_ns of the merged requests to be the 878 * current request for accounting purposes. 879 */ 880 if (next->start_time_ns < req->start_time_ns) 881 req->start_time_ns = next->start_time_ns; 882 883 req->biotail->bi_next = next->bio; 884 req->biotail = next->biotail; 885 886 req->__data_len += blk_rq_bytes(next); 887 888 if (!blk_discard_mergable(req)) 889 elv_merge_requests(q, req, next); 890 891 /* 892 * 'next' is going away, so update stats accordingly 893 */ 894 blk_account_io_merge(next); 895 896 /* 897 * ownership of bio passed from next to req, return 'next' for 898 * the caller to free 899 */ 900 next->bio = NULL; 901 return next; 902 } 903 904 struct request *attempt_back_merge(struct request_queue *q, struct request *rq) 905 { 906 struct request *next = elv_latter_request(q, rq); 907 908 if (next) 909 return attempt_merge(q, rq, next); 910 911 return NULL; 912 } 913 914 struct request *attempt_front_merge(struct request_queue *q, struct request *rq) 915 { 916 struct request *prev = elv_former_request(q, rq); 917 918 if (prev) 919 return attempt_merge(q, prev, rq); 920 921 return NULL; 922 } 923 924 int blk_attempt_req_merge(struct request_queue *q, struct request *rq, 925 struct request *next) 926 { 927 struct request *free; 928 929 free = attempt_merge(q, rq, next); 930 if (free) { 931 blk_put_request(free); 932 return 1; 933 } 934 935 return 0; 936 } 937 938 bool blk_rq_merge_ok(struct request *rq, struct bio *bio) 939 { 940 if (!rq_mergeable(rq) || !bio_mergeable(bio)) 941 return false; 942 943 if (req_op(rq) != bio_op(bio)) 944 return false; 945 946 /* different data direction or already started, don't merge */ 947 if (bio_data_dir(bio) != rq_data_dir(rq)) 948 return false; 949 950 /* must be same device */ 951 if (rq->rq_disk != bio->bi_disk) 952 return false; 953 954 /* only merge integrity protected bio into ditto rq */ 955 if (blk_integrity_merge_bio(rq->q, rq, bio) == false) 956 return false; 957 958 /* must be using the same buffer */ 959 if (req_op(rq) == REQ_OP_WRITE_SAME && 960 !blk_write_same_mergeable(rq->bio, bio)) 961 return false; 962 963 /* 964 * Don't allow merge of different write hints, or for a hint with 965 * non-hint IO. 966 */ 967 if (rq->write_hint != bio->bi_write_hint) 968 return false; 969 970 if (rq->ioprio != bio_prio(bio)) 971 return false; 972 973 return true; 974 } 975 976 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) 977 { 978 if (blk_discard_mergable(rq)) 979 return ELEVATOR_DISCARD_MERGE; 980 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) 981 return ELEVATOR_BACK_MERGE; 982 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) 983 return ELEVATOR_FRONT_MERGE; 984 return ELEVATOR_NO_MERGE; 985 } 986