1 /* 2 * Functions related to segment and merge handling 3 */ 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/bio.h> 7 #include <linux/blkdev.h> 8 #include <linux/scatterlist.h> 9 10 #include <trace/events/block.h> 11 12 #include "blk.h" 13 14 static struct bio *blk_bio_discard_split(struct request_queue *q, 15 struct bio *bio, 16 struct bio_set *bs, 17 unsigned *nsegs) 18 { 19 unsigned int max_discard_sectors, granularity; 20 int alignment; 21 sector_t tmp; 22 unsigned split_sectors; 23 24 *nsegs = 1; 25 26 /* Zero-sector (unknown) and one-sector granularities are the same. */ 27 granularity = max(q->limits.discard_granularity >> 9, 1U); 28 29 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); 30 max_discard_sectors -= max_discard_sectors % granularity; 31 32 if (unlikely(!max_discard_sectors)) { 33 /* XXX: warn */ 34 return NULL; 35 } 36 37 if (bio_sectors(bio) <= max_discard_sectors) 38 return NULL; 39 40 split_sectors = max_discard_sectors; 41 42 /* 43 * If the next starting sector would be misaligned, stop the discard at 44 * the previous aligned sector. 45 */ 46 alignment = (q->limits.discard_alignment >> 9) % granularity; 47 48 tmp = bio->bi_iter.bi_sector + split_sectors - alignment; 49 tmp = sector_div(tmp, granularity); 50 51 if (split_sectors > tmp) 52 split_sectors -= tmp; 53 54 return bio_split(bio, split_sectors, GFP_NOIO, bs); 55 } 56 57 static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, 58 struct bio *bio, struct bio_set *bs, unsigned *nsegs) 59 { 60 *nsegs = 1; 61 62 if (!q->limits.max_write_zeroes_sectors) 63 return NULL; 64 65 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) 66 return NULL; 67 68 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); 69 } 70 71 static struct bio *blk_bio_write_same_split(struct request_queue *q, 72 struct bio *bio, 73 struct bio_set *bs, 74 unsigned *nsegs) 75 { 76 *nsegs = 1; 77 78 if (!q->limits.max_write_same_sectors) 79 return NULL; 80 81 if (bio_sectors(bio) <= q->limits.max_write_same_sectors) 82 return NULL; 83 84 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); 85 } 86 87 static inline unsigned get_max_io_size(struct request_queue *q, 88 struct bio *bio) 89 { 90 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); 91 unsigned mask = queue_logical_block_size(q) - 1; 92 93 /* aligned to logical block size */ 94 sectors &= ~(mask >> 9); 95 96 return sectors; 97 } 98 99 static struct bio *blk_bio_segment_split(struct request_queue *q, 100 struct bio *bio, 101 struct bio_set *bs, 102 unsigned *segs) 103 { 104 struct bio_vec bv, bvprv, *bvprvp = NULL; 105 struct bvec_iter iter; 106 unsigned seg_size = 0, nsegs = 0, sectors = 0; 107 unsigned front_seg_size = bio->bi_seg_front_size; 108 bool do_split = true; 109 struct bio *new = NULL; 110 const unsigned max_sectors = get_max_io_size(q, bio); 111 unsigned bvecs = 0; 112 113 bio_for_each_segment(bv, bio, iter) { 114 /* 115 * With arbitrary bio size, the incoming bio may be very 116 * big. We have to split the bio into small bios so that 117 * each holds at most BIO_MAX_PAGES bvecs because 118 * bio_clone() can fail to allocate big bvecs. 119 * 120 * It should have been better to apply the limit per 121 * request queue in which bio_clone() is involved, 122 * instead of globally. The biggest blocker is the 123 * bio_clone() in bio bounce. 124 * 125 * If bio is splitted by this reason, we should have 126 * allowed to continue bios merging, but don't do 127 * that now for making the change simple. 128 * 129 * TODO: deal with bio bounce's bio_clone() gracefully 130 * and convert the global limit into per-queue limit. 131 */ 132 if (bvecs++ >= BIO_MAX_PAGES) 133 goto split; 134 135 /* 136 * If the queue doesn't support SG gaps and adding this 137 * offset would create a gap, disallow it. 138 */ 139 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) 140 goto split; 141 142 if (sectors + (bv.bv_len >> 9) > max_sectors) { 143 /* 144 * Consider this a new segment if we're splitting in 145 * the middle of this vector. 146 */ 147 if (nsegs < queue_max_segments(q) && 148 sectors < max_sectors) { 149 nsegs++; 150 sectors = max_sectors; 151 } 152 if (sectors) 153 goto split; 154 /* Make this single bvec as the 1st segment */ 155 } 156 157 if (bvprvp && blk_queue_cluster(q)) { 158 if (seg_size + bv.bv_len > queue_max_segment_size(q)) 159 goto new_segment; 160 if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv)) 161 goto new_segment; 162 if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) 163 goto new_segment; 164 165 seg_size += bv.bv_len; 166 bvprv = bv; 167 bvprvp = &bvprv; 168 sectors += bv.bv_len >> 9; 169 170 if (nsegs == 1 && seg_size > front_seg_size) 171 front_seg_size = seg_size; 172 continue; 173 } 174 new_segment: 175 if (nsegs == queue_max_segments(q)) 176 goto split; 177 178 nsegs++; 179 bvprv = bv; 180 bvprvp = &bvprv; 181 seg_size = bv.bv_len; 182 sectors += bv.bv_len >> 9; 183 184 if (nsegs == 1 && seg_size > front_seg_size) 185 front_seg_size = seg_size; 186 } 187 188 do_split = false; 189 split: 190 *segs = nsegs; 191 192 if (do_split) { 193 new = bio_split(bio, sectors, GFP_NOIO, bs); 194 if (new) 195 bio = new; 196 } 197 198 bio->bi_seg_front_size = front_seg_size; 199 if (seg_size > bio->bi_seg_back_size) 200 bio->bi_seg_back_size = seg_size; 201 202 return do_split ? new : NULL; 203 } 204 205 void blk_queue_split(struct request_queue *q, struct bio **bio, 206 struct bio_set *bs) 207 { 208 struct bio *split, *res; 209 unsigned nsegs; 210 211 switch (bio_op(*bio)) { 212 case REQ_OP_DISCARD: 213 case REQ_OP_SECURE_ERASE: 214 split = blk_bio_discard_split(q, *bio, bs, &nsegs); 215 break; 216 case REQ_OP_WRITE_ZEROES: 217 split = blk_bio_write_zeroes_split(q, *bio, bs, &nsegs); 218 break; 219 case REQ_OP_WRITE_SAME: 220 split = blk_bio_write_same_split(q, *bio, bs, &nsegs); 221 break; 222 default: 223 split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); 224 break; 225 } 226 227 /* physical segments can be figured out during splitting */ 228 res = split ? split : *bio; 229 res->bi_phys_segments = nsegs; 230 bio_set_flag(res, BIO_SEG_VALID); 231 232 if (split) { 233 /* there isn't chance to merge the splitted bio */ 234 split->bi_opf |= REQ_NOMERGE; 235 236 bio_chain(split, *bio); 237 trace_block_split(q, split, (*bio)->bi_iter.bi_sector); 238 generic_make_request(*bio); 239 *bio = split; 240 } 241 } 242 EXPORT_SYMBOL(blk_queue_split); 243 244 static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 245 struct bio *bio, 246 bool no_sg_merge) 247 { 248 struct bio_vec bv, bvprv = { NULL }; 249 int cluster, prev = 0; 250 unsigned int seg_size, nr_phys_segs; 251 struct bio *fbio, *bbio; 252 struct bvec_iter iter; 253 254 if (!bio) 255 return 0; 256 257 switch (bio_op(bio)) { 258 case REQ_OP_DISCARD: 259 case REQ_OP_SECURE_ERASE: 260 case REQ_OP_WRITE_ZEROES: 261 return 0; 262 case REQ_OP_WRITE_SAME: 263 return 1; 264 } 265 266 fbio = bio; 267 cluster = blk_queue_cluster(q); 268 seg_size = 0; 269 nr_phys_segs = 0; 270 for_each_bio(bio) { 271 bio_for_each_segment(bv, bio, iter) { 272 /* 273 * If SG merging is disabled, each bio vector is 274 * a segment 275 */ 276 if (no_sg_merge) 277 goto new_segment; 278 279 if (prev && cluster) { 280 if (seg_size + bv.bv_len 281 > queue_max_segment_size(q)) 282 goto new_segment; 283 if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) 284 goto new_segment; 285 if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) 286 goto new_segment; 287 288 seg_size += bv.bv_len; 289 bvprv = bv; 290 continue; 291 } 292 new_segment: 293 if (nr_phys_segs == 1 && seg_size > 294 fbio->bi_seg_front_size) 295 fbio->bi_seg_front_size = seg_size; 296 297 nr_phys_segs++; 298 bvprv = bv; 299 prev = 1; 300 seg_size = bv.bv_len; 301 } 302 bbio = bio; 303 } 304 305 if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) 306 fbio->bi_seg_front_size = seg_size; 307 if (seg_size > bbio->bi_seg_back_size) 308 bbio->bi_seg_back_size = seg_size; 309 310 return nr_phys_segs; 311 } 312 313 void blk_recalc_rq_segments(struct request *rq) 314 { 315 bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, 316 &rq->q->queue_flags); 317 318 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, 319 no_sg_merge); 320 } 321 322 void blk_recount_segments(struct request_queue *q, struct bio *bio) 323 { 324 unsigned short seg_cnt; 325 326 /* estimate segment number by bi_vcnt for non-cloned bio */ 327 if (bio_flagged(bio, BIO_CLONED)) 328 seg_cnt = bio_segments(bio); 329 else 330 seg_cnt = bio->bi_vcnt; 331 332 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && 333 (seg_cnt < queue_max_segments(q))) 334 bio->bi_phys_segments = seg_cnt; 335 else { 336 struct bio *nxt = bio->bi_next; 337 338 bio->bi_next = NULL; 339 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); 340 bio->bi_next = nxt; 341 } 342 343 bio_set_flag(bio, BIO_SEG_VALID); 344 } 345 EXPORT_SYMBOL(blk_recount_segments); 346 347 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 348 struct bio *nxt) 349 { 350 struct bio_vec end_bv = { NULL }, nxt_bv; 351 352 if (!blk_queue_cluster(q)) 353 return 0; 354 355 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > 356 queue_max_segment_size(q)) 357 return 0; 358 359 if (!bio_has_data(bio)) 360 return 1; 361 362 bio_get_last_bvec(bio, &end_bv); 363 bio_get_first_bvec(nxt, &nxt_bv); 364 365 if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) 366 return 0; 367 368 /* 369 * bio and nxt are contiguous in memory; check if the queue allows 370 * these two to be merged into one 371 */ 372 if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) 373 return 1; 374 375 return 0; 376 } 377 378 static inline void 379 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, 380 struct scatterlist *sglist, struct bio_vec *bvprv, 381 struct scatterlist **sg, int *nsegs, int *cluster) 382 { 383 384 int nbytes = bvec->bv_len; 385 386 if (*sg && *cluster) { 387 if ((*sg)->length + nbytes > queue_max_segment_size(q)) 388 goto new_segment; 389 390 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) 391 goto new_segment; 392 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) 393 goto new_segment; 394 395 (*sg)->length += nbytes; 396 } else { 397 new_segment: 398 if (!*sg) 399 *sg = sglist; 400 else { 401 /* 402 * If the driver previously mapped a shorter 403 * list, we could see a termination bit 404 * prematurely unless it fully inits the sg 405 * table on each mapping. We KNOW that there 406 * must be more entries here or the driver 407 * would be buggy, so force clear the 408 * termination bit to avoid doing a full 409 * sg_init_table() in drivers for each command. 410 */ 411 sg_unmark_end(*sg); 412 *sg = sg_next(*sg); 413 } 414 415 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); 416 (*nsegs)++; 417 } 418 *bvprv = *bvec; 419 } 420 421 static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv, 422 struct scatterlist *sglist, struct scatterlist **sg) 423 { 424 *sg = sglist; 425 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); 426 return 1; 427 } 428 429 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, 430 struct scatterlist *sglist, 431 struct scatterlist **sg) 432 { 433 struct bio_vec bvec, bvprv = { NULL }; 434 struct bvec_iter iter; 435 int cluster = blk_queue_cluster(q), nsegs = 0; 436 437 for_each_bio(bio) 438 bio_for_each_segment(bvec, bio, iter) 439 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, 440 &nsegs, &cluster); 441 442 return nsegs; 443 } 444 445 /* 446 * map a request to scatterlist, return number of sg entries setup. Caller 447 * must make sure sg can hold rq->nr_phys_segments entries 448 */ 449 int blk_rq_map_sg(struct request_queue *q, struct request *rq, 450 struct scatterlist *sglist) 451 { 452 struct scatterlist *sg = NULL; 453 int nsegs = 0; 454 455 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 456 nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg); 457 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) 458 nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg); 459 else if (rq->bio) 460 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); 461 462 if (unlikely(rq->rq_flags & RQF_COPY_USER) && 463 (blk_rq_bytes(rq) & q->dma_pad_mask)) { 464 unsigned int pad_len = 465 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; 466 467 sg->length += pad_len; 468 rq->extra_len += pad_len; 469 } 470 471 if (q->dma_drain_size && q->dma_drain_needed(rq)) { 472 if (op_is_write(req_op(rq))) 473 memset(q->dma_drain_buffer, 0, q->dma_drain_size); 474 475 sg_unmark_end(sg); 476 sg = sg_next(sg); 477 sg_set_page(sg, virt_to_page(q->dma_drain_buffer), 478 q->dma_drain_size, 479 ((unsigned long)q->dma_drain_buffer) & 480 (PAGE_SIZE - 1)); 481 nsegs++; 482 rq->extra_len += q->dma_drain_size; 483 } 484 485 if (sg) 486 sg_mark_end(sg); 487 488 /* 489 * Something must have been wrong if the figured number of 490 * segment is bigger than number of req's physical segments 491 */ 492 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); 493 494 return nsegs; 495 } 496 EXPORT_SYMBOL(blk_rq_map_sg); 497 498 static inline int ll_new_hw_segment(struct request_queue *q, 499 struct request *req, 500 struct bio *bio) 501 { 502 int nr_phys_segs = bio_phys_segments(q, bio); 503 504 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) 505 goto no_merge; 506 507 if (blk_integrity_merge_bio(q, req, bio) == false) 508 goto no_merge; 509 510 /* 511 * This will form the start of a new hw segment. Bump both 512 * counters. 513 */ 514 req->nr_phys_segments += nr_phys_segs; 515 return 1; 516 517 no_merge: 518 req_set_nomerge(q, req); 519 return 0; 520 } 521 522 int ll_back_merge_fn(struct request_queue *q, struct request *req, 523 struct bio *bio) 524 { 525 if (req_gap_back_merge(req, bio)) 526 return 0; 527 if (blk_integrity_rq(req) && 528 integrity_req_gap_back_merge(req, bio)) 529 return 0; 530 if (blk_rq_sectors(req) + bio_sectors(bio) > 531 blk_rq_get_max_sectors(req, blk_rq_pos(req))) { 532 req_set_nomerge(q, req); 533 return 0; 534 } 535 if (!bio_flagged(req->biotail, BIO_SEG_VALID)) 536 blk_recount_segments(q, req->biotail); 537 if (!bio_flagged(bio, BIO_SEG_VALID)) 538 blk_recount_segments(q, bio); 539 540 return ll_new_hw_segment(q, req, bio); 541 } 542 543 int ll_front_merge_fn(struct request_queue *q, struct request *req, 544 struct bio *bio) 545 { 546 547 if (req_gap_front_merge(req, bio)) 548 return 0; 549 if (blk_integrity_rq(req) && 550 integrity_req_gap_front_merge(req, bio)) 551 return 0; 552 if (blk_rq_sectors(req) + bio_sectors(bio) > 553 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { 554 req_set_nomerge(q, req); 555 return 0; 556 } 557 if (!bio_flagged(bio, BIO_SEG_VALID)) 558 blk_recount_segments(q, bio); 559 if (!bio_flagged(req->bio, BIO_SEG_VALID)) 560 blk_recount_segments(q, req->bio); 561 562 return ll_new_hw_segment(q, req, bio); 563 } 564 565 /* 566 * blk-mq uses req->special to carry normal driver per-request payload, it 567 * does not indicate a prepared command that we cannot merge with. 568 */ 569 static bool req_no_special_merge(struct request *req) 570 { 571 struct request_queue *q = req->q; 572 573 return !q->mq_ops && req->special; 574 } 575 576 static int ll_merge_requests_fn(struct request_queue *q, struct request *req, 577 struct request *next) 578 { 579 int total_phys_segments; 580 unsigned int seg_size = 581 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; 582 583 /* 584 * First check if the either of the requests are re-queued 585 * requests. Can't merge them if they are. 586 */ 587 if (req_no_special_merge(req) || req_no_special_merge(next)) 588 return 0; 589 590 if (req_gap_back_merge(req, next->bio)) 591 return 0; 592 593 /* 594 * Will it become too large? 595 */ 596 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > 597 blk_rq_get_max_sectors(req, blk_rq_pos(req))) 598 return 0; 599 600 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; 601 if (blk_phys_contig_segment(q, req->biotail, next->bio)) { 602 if (req->nr_phys_segments == 1) 603 req->bio->bi_seg_front_size = seg_size; 604 if (next->nr_phys_segments == 1) 605 next->biotail->bi_seg_back_size = seg_size; 606 total_phys_segments--; 607 } 608 609 if (total_phys_segments > queue_max_segments(q)) 610 return 0; 611 612 if (blk_integrity_merge_rq(q, req, next) == false) 613 return 0; 614 615 /* Merge is OK... */ 616 req->nr_phys_segments = total_phys_segments; 617 return 1; 618 } 619 620 /** 621 * blk_rq_set_mixed_merge - mark a request as mixed merge 622 * @rq: request to mark as mixed merge 623 * 624 * Description: 625 * @rq is about to be mixed merged. Make sure the attributes 626 * which can be mixed are set in each bio and mark @rq as mixed 627 * merged. 628 */ 629 void blk_rq_set_mixed_merge(struct request *rq) 630 { 631 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 632 struct bio *bio; 633 634 if (rq->rq_flags & RQF_MIXED_MERGE) 635 return; 636 637 /* 638 * @rq will no longer represent mixable attributes for all the 639 * contained bios. It will just track those of the first one. 640 * Distributes the attributs to each bio. 641 */ 642 for (bio = rq->bio; bio; bio = bio->bi_next) { 643 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && 644 (bio->bi_opf & REQ_FAILFAST_MASK) != ff); 645 bio->bi_opf |= ff; 646 } 647 rq->rq_flags |= RQF_MIXED_MERGE; 648 } 649 650 static void blk_account_io_merge(struct request *req) 651 { 652 if (blk_do_io_stat(req)) { 653 struct hd_struct *part; 654 int cpu; 655 656 cpu = part_stat_lock(); 657 part = req->part; 658 659 part_round_stats(cpu, part); 660 part_dec_in_flight(part, rq_data_dir(req)); 661 662 hd_struct_put(part); 663 part_stat_unlock(); 664 } 665 } 666 667 /* 668 * For non-mq, this has to be called with the request spinlock acquired. 669 * For mq with scheduling, the appropriate queue wide lock should be held. 670 */ 671 static struct request *attempt_merge(struct request_queue *q, 672 struct request *req, struct request *next) 673 { 674 if (!rq_mergeable(req) || !rq_mergeable(next)) 675 return NULL; 676 677 if (req_op(req) != req_op(next)) 678 return NULL; 679 680 /* 681 * not contiguous 682 */ 683 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) 684 return NULL; 685 686 if (rq_data_dir(req) != rq_data_dir(next) 687 || req->rq_disk != next->rq_disk 688 || req_no_special_merge(next)) 689 return NULL; 690 691 if (req_op(req) == REQ_OP_WRITE_SAME && 692 !blk_write_same_mergeable(req->bio, next->bio)) 693 return NULL; 694 695 /* 696 * If we are allowed to merge, then append bio list 697 * from next to rq and release next. merge_requests_fn 698 * will have updated segment counts, update sector 699 * counts here. 700 */ 701 if (!ll_merge_requests_fn(q, req, next)) 702 return NULL; 703 704 /* 705 * If failfast settings disagree or any of the two is already 706 * a mixed merge, mark both as mixed before proceeding. This 707 * makes sure that all involved bios have mixable attributes 708 * set properly. 709 */ 710 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || 711 (req->cmd_flags & REQ_FAILFAST_MASK) != 712 (next->cmd_flags & REQ_FAILFAST_MASK)) { 713 blk_rq_set_mixed_merge(req); 714 blk_rq_set_mixed_merge(next); 715 } 716 717 /* 718 * At this point we have either done a back merge 719 * or front merge. We need the smaller start_time of 720 * the merged requests to be the current request 721 * for accounting purposes. 722 */ 723 if (time_after(req->start_time, next->start_time)) 724 req->start_time = next->start_time; 725 726 req->biotail->bi_next = next->bio; 727 req->biotail = next->biotail; 728 729 req->__data_len += blk_rq_bytes(next); 730 731 elv_merge_requests(q, req, next); 732 733 /* 734 * 'next' is going away, so update stats accordingly 735 */ 736 blk_account_io_merge(next); 737 738 req->ioprio = ioprio_best(req->ioprio, next->ioprio); 739 if (blk_rq_cpu_valid(next)) 740 req->cpu = next->cpu; 741 742 /* 743 * ownership of bio passed from next to req, return 'next' for 744 * the caller to free 745 */ 746 next->bio = NULL; 747 return next; 748 } 749 750 struct request *attempt_back_merge(struct request_queue *q, struct request *rq) 751 { 752 struct request *next = elv_latter_request(q, rq); 753 754 if (next) 755 return attempt_merge(q, rq, next); 756 757 return NULL; 758 } 759 760 struct request *attempt_front_merge(struct request_queue *q, struct request *rq) 761 { 762 struct request *prev = elv_former_request(q, rq); 763 764 if (prev) 765 return attempt_merge(q, prev, rq); 766 767 return NULL; 768 } 769 770 int blk_attempt_req_merge(struct request_queue *q, struct request *rq, 771 struct request *next) 772 { 773 struct elevator_queue *e = q->elevator; 774 struct request *free; 775 776 if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn) 777 if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next)) 778 return 0; 779 780 free = attempt_merge(q, rq, next); 781 if (free) { 782 __blk_put_request(q, free); 783 return 1; 784 } 785 786 return 0; 787 } 788 789 bool blk_rq_merge_ok(struct request *rq, struct bio *bio) 790 { 791 if (!rq_mergeable(rq) || !bio_mergeable(bio)) 792 return false; 793 794 if (req_op(rq) != bio_op(bio)) 795 return false; 796 797 /* different data direction or already started, don't merge */ 798 if (bio_data_dir(bio) != rq_data_dir(rq)) 799 return false; 800 801 /* must be same device and not a special request */ 802 if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) 803 return false; 804 805 /* only merge integrity protected bio into ditto rq */ 806 if (blk_integrity_merge_bio(rq->q, rq, bio) == false) 807 return false; 808 809 /* must be using the same buffer */ 810 if (req_op(rq) == REQ_OP_WRITE_SAME && 811 !blk_write_same_mergeable(rq->bio, bio)) 812 return false; 813 814 return true; 815 } 816 817 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) 818 { 819 if (req_op(rq) == REQ_OP_DISCARD && 820 queue_max_discard_segments(rq->q) > 1) 821 return ELEVATOR_DISCARD_MERGE; 822 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) 823 return ELEVATOR_BACK_MERGE; 824 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) 825 return ELEVATOR_FRONT_MERGE; 826 return ELEVATOR_NO_MERGE; 827 } 828