blk-merge.c (3bfaf1f7044c6a3b1e00fcad2d0529f0da449d67) | blk-merge.c (8a96a0e408102fb7aa73d8aa0b5e2219cfd51e55) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Functions related to segment and merge handling 4 */ 5#include <linux/kernel.h> 6#include <linux/module.h> 7#include <linux/bio.h> 8#include <linux/blkdev.h> --- 253 unchanged lines hidden (view full) --- 262 bvec_split_segs(q, &bv, &nsegs, 263 &seg_size, 264 &front_seg_size, 265 §ors, max_segs); 266 } 267 goto split; 268 } 269 | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Functions related to segment and merge handling 4 */ 5#include <linux/kernel.h> 6#include <linux/module.h> 7#include <linux/bio.h> 8#include <linux/blkdev.h> --- 253 unchanged lines hidden (view full) --- 262 bvec_split_segs(q, &bv, &nsegs, 263 &seg_size, 264 &front_seg_size, 265 §ors, max_segs); 266 } 267 goto split; 268 } 269 |
270 if (bvprvp) { 271 if (seg_size + bv.bv_len > queue_max_segment_size(q)) 272 goto new_segment; 273 if (!biovec_phys_mergeable(q, bvprvp, &bv)) 274 goto new_segment; 275 276 seg_size += bv.bv_len; 277 bvprv = bv; 278 bvprvp = &bvprv; 279 sectors += bv.bv_len >> 9; 280 281 if (nsegs == 1 && seg_size > front_seg_size) 282 front_seg_size = seg_size; 283 284 continue; 285 } 286new_segment: | |
287 if (nsegs == max_segs) 288 goto split; 289 290 bvprv = bv; 291 bvprvp = &bvprv; 292 293 if (bv.bv_offset + bv.bv_len <= PAGE_SIZE) { 294 nsegs++; --- 70 unchanged lines hidden (view full) --- 365 *bio = split; 366 } 367} 368EXPORT_SYMBOL(blk_queue_split); 369 370static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 371 struct bio *bio) 372{ | 270 if (nsegs == max_segs) 271 goto split; 272 273 bvprv = bv; 274 bvprvp = &bvprv; 275 276 if (bv.bv_offset + bv.bv_len <= PAGE_SIZE) { 277 nsegs++; --- 70 unchanged lines hidden (view full) --- 348 *bio = split; 349 } 350} 351EXPORT_SYMBOL(blk_queue_split); 352 353static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 354 struct bio *bio) 355{ |
373 struct bio_vec bv, bvprv = { NULL }; 374 int prev = 0; | 356 struct bio_vec uninitialized_var(bv), bvprv = { NULL }; |
375 unsigned int seg_size, nr_phys_segs; 376 unsigned front_seg_size; 377 struct bio *fbio, *bbio; 378 struct bvec_iter iter; | 357 unsigned int seg_size, nr_phys_segs; 358 unsigned front_seg_size; 359 struct bio *fbio, *bbio; 360 struct bvec_iter iter; |
361 bool new_bio = false; |
|
379 380 if (!bio) 381 return 0; 382 383 front_seg_size = bio->bi_seg_front_size; 384 385 switch (bio_op(bio)) { 386 case REQ_OP_DISCARD: --- 4 unchanged lines hidden (view full) --- 391 return 1; 392 } 393 394 fbio = bio; 395 seg_size = 0; 396 nr_phys_segs = 0; 397 for_each_bio(bio) { 398 bio_for_each_bvec(bv, bio, iter) { | 362 363 if (!bio) 364 return 0; 365 366 front_seg_size = bio->bi_seg_front_size; 367 368 switch (bio_op(bio)) { 369 case REQ_OP_DISCARD: --- 4 unchanged lines hidden (view full) --- 374 return 1; 375 } 376 377 fbio = bio; 378 seg_size = 0; 379 nr_phys_segs = 0; 380 for_each_bio(bio) { 381 bio_for_each_bvec(bv, bio, iter) { |
399 if (prev) { | 382 if (new_bio) { |
400 if (seg_size + bv.bv_len 401 > queue_max_segment_size(q)) 402 goto new_segment; 403 if (!biovec_phys_mergeable(q, &bvprv, &bv)) 404 goto new_segment; 405 406 seg_size += bv.bv_len; | 383 if (seg_size + bv.bv_len 384 > queue_max_segment_size(q)) 385 goto new_segment; 386 if (!biovec_phys_mergeable(q, &bvprv, &bv)) 387 goto new_segment; 388 389 seg_size += bv.bv_len; |
407 bvprv = bv; | |
408 409 if (nr_phys_segs == 1 && seg_size > 410 front_seg_size) 411 front_seg_size = seg_size; 412 413 continue; 414 } 415new_segment: | 390 391 if (nr_phys_segs == 1 && seg_size > 392 front_seg_size) 393 front_seg_size = seg_size; 394 395 continue; 396 } 397new_segment: |
416 bvprv = bv; 417 prev = 1; | |
418 bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size, 419 &front_seg_size, NULL, UINT_MAX); | 398 bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size, 399 &front_seg_size, NULL, UINT_MAX); |
400 new_bio = false; |
|
420 } 421 bbio = bio; | 401 } 402 bbio = bio; |
403 if (likely(bio->bi_iter.bi_size)) { 404 bvprv = bv; 405 new_bio = true; 406 } |
|
422 } 423 424 fbio->bi_seg_front_size = front_seg_size; 425 if (seg_size > bbio->bi_seg_back_size) 426 bbio->bi_seg_back_size = seg_size; 427 428 return nr_phys_segs; 429} --- 49 unchanged lines hidden (view full) --- 479 return sg_next(*sg); 480} 481 482static unsigned blk_bvec_map_sg(struct request_queue *q, 483 struct bio_vec *bvec, struct scatterlist *sglist, 484 struct scatterlist **sg) 485{ 486 unsigned nbytes = bvec->bv_len; | 407 } 408 409 fbio->bi_seg_front_size = front_seg_size; 410 if (seg_size > bbio->bi_seg_back_size) 411 bbio->bi_seg_back_size = seg_size; 412 413 return nr_phys_segs; 414} --- 49 unchanged lines hidden (view full) --- 464 return sg_next(*sg); 465} 466 467static unsigned blk_bvec_map_sg(struct request_queue *q, 468 struct bio_vec *bvec, struct scatterlist *sglist, 469 struct scatterlist **sg) 470{ 471 unsigned nbytes = bvec->bv_len; |
487 unsigned nsegs = 0, total = 0, offset = 0; | 472 unsigned nsegs = 0, total = 0; |
488 489 while (nbytes > 0) { | 473 474 while (nbytes > 0) { |
490 unsigned seg_size; 491 struct page *pg; 492 unsigned idx; | 475 unsigned offset = bvec->bv_offset + total; 476 unsigned len = min(get_max_segment_size(q, offset), nbytes); |
493 494 *sg = blk_next_sg(sg, sglist); | 477 478 *sg = blk_next_sg(sg, sglist); |
479 sg_set_page(*sg, bvec->bv_page, len, offset); |
|
495 | 480 |
496 seg_size = get_max_segment_size(q, bvec->bv_offset + total); 497 seg_size = min(nbytes, seg_size); 498 499 offset = (total + bvec->bv_offset) % PAGE_SIZE; 500 idx = (total + bvec->bv_offset) / PAGE_SIZE; 501 pg = bvec_nth_page(bvec->bv_page, idx); 502 503 sg_set_page(*sg, pg, seg_size, offset); 504 505 total += seg_size; 506 nbytes -= seg_size; | 481 total += len; 482 nbytes -= len; |
507 nsegs++; 508 } 509 510 return nsegs; 511} 512 | 483 nsegs++; 484 } 485 486 return nsegs; 487} 488 |
513static inline void 514__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, 515 struct scatterlist *sglist, struct bio_vec *bvprv, 516 struct scatterlist **sg, int *nsegs) | 489static inline int __blk_bvec_map_sg(struct bio_vec bv, 490 struct scatterlist *sglist, struct scatterlist **sg) |
517{ | 491{ |
492 *sg = blk_next_sg(sg, sglist); 493 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); 494 return 1; 495} |
|
518 | 496 |
497/* only try to merge bvecs into one sg if they are from two bios */ 498static inline bool 499__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec, 500 struct bio_vec *bvprv, struct scatterlist **sg) 501{ 502 |
|
519 int nbytes = bvec->bv_len; 520 | 503 int nbytes = bvec->bv_len; 504 |
521 if (*sg) { 522 if ((*sg)->length + nbytes > queue_max_segment_size(q)) 523 goto new_segment; 524 if (!biovec_phys_mergeable(q, bvprv, bvec)) 525 goto new_segment; | 505 if (!*sg) 506 return false; |
526 | 507 |
527 (*sg)->length += nbytes; 528 } else { 529new_segment: 530 if (bvec->bv_offset + bvec->bv_len <= PAGE_SIZE) { 531 *sg = blk_next_sg(sg, sglist); 532 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); 533 (*nsegs) += 1; 534 } else 535 (*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg); 536 } 537 *bvprv = *bvec; 538} | 508 if ((*sg)->length + nbytes > queue_max_segment_size(q)) 509 return false; |
539 | 510 |
540static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv, 541 struct scatterlist *sglist, struct scatterlist **sg) 542{ 543 *sg = sglist; 544 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); 545 return 1; | 511 if (!biovec_phys_mergeable(q, bvprv, bvec)) 512 return false; 513 514 (*sg)->length += nbytes; 515 516 return true; |
546} 547 548static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, 549 struct scatterlist *sglist, 550 struct scatterlist **sg) 551{ | 517} 518 519static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, 520 struct scatterlist *sglist, 521 struct scatterlist **sg) 522{ |
552 struct bio_vec bvec, bvprv = { NULL }; | 523 struct bio_vec uninitialized_var(bvec), bvprv = { NULL }; |
553 struct bvec_iter iter; 554 int nsegs = 0; | 524 struct bvec_iter iter; 525 int nsegs = 0; |
526 bool new_bio = false; |
|
555 | 527 |
556 for_each_bio(bio) 557 bio_for_each_bvec(bvec, bio, iter) 558 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, 559 &nsegs); | 528 for_each_bio(bio) { 529 bio_for_each_bvec(bvec, bio, iter) { 530 /* 531 * Only try to merge bvecs from two bios given we 532 * have done bio internal merge when adding pages 533 * to bio 534 */ 535 if (new_bio && 536 __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg)) 537 goto next_bvec; |
560 | 538 |
539 if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE) 540 nsegs += __blk_bvec_map_sg(bvec, sglist, sg); 541 else 542 nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg); 543 next_bvec: 544 new_bio = false; 545 } 546 if (likely(bio->bi_iter.bi_size)) { 547 bvprv = bvec; 548 new_bio = true; 549 } 550 } 551 |
|
561 return nsegs; 562} 563 564/* 565 * map a request to scatterlist, return number of sg entries setup. Caller 566 * must make sure sg can hold rq->nr_phys_segments entries 567 */ 568int blk_rq_map_sg(struct request_queue *q, struct request *rq, 569 struct scatterlist *sglist) 570{ 571 struct scatterlist *sg = NULL; 572 int nsegs = 0; 573 574 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) | 552 return nsegs; 553} 554 555/* 556 * map a request to scatterlist, return number of sg entries setup. Caller 557 * must make sure sg can hold rq->nr_phys_segments entries 558 */ 559int blk_rq_map_sg(struct request_queue *q, struct request *rq, 560 struct scatterlist *sglist) 561{ 562 struct scatterlist *sg = NULL; 563 int nsegs = 0; 564 565 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) |
575 nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg); | 566 nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg); |
576 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) | 567 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) |
577 nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg); | 568 nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg); |
578 else if (rq->bio) 579 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); 580 581 if (unlikely(rq->rq_flags & RQF_COPY_USER) && 582 (blk_rq_bytes(rq) & q->dma_pad_mask)) { 583 unsigned int pad_len = 584 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; 585 --- 400 unchanged lines hidden --- | 569 else if (rq->bio) 570 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); 571 572 if (unlikely(rq->rq_flags & RQF_COPY_USER) && 573 (blk_rq_bytes(rq) & q->dma_pad_mask)) { 574 unsigned int pad_len = 575 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; 576 --- 400 unchanged lines hidden --- |