blk-merge.c (371bb62158d53c1fc33e2fb9b6aeb9522caf6cf4) | blk-merge.c (d665e12aa713e598a1100a320e5679c3f73823ed) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Functions related to segment and merge handling 4 */ 5#include <linux/kernel.h> 6#include <linux/module.h> 7#include <linux/bio.h> 8#include <linux/blkdev.h> --- 91 unchanged lines hidden (view full) --- 100 split_sectors -= tmp; 101 102 return bio_split(bio, split_sectors, GFP_NOIO, bs); 103} 104 105static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, 106 struct bio *bio, struct bio_set *bs, unsigned *nsegs) 107{ | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Functions related to segment and merge handling 4 */ 5#include <linux/kernel.h> 6#include <linux/module.h> 7#include <linux/bio.h> 8#include <linux/blkdev.h> --- 91 unchanged lines hidden (view full) --- 100 split_sectors -= tmp; 101 102 return bio_split(bio, split_sectors, GFP_NOIO, bs); 103} 104 105static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, 106 struct bio *bio, struct bio_set *bs, unsigned *nsegs) 107{ |
108 *nsegs = 1; | 108 *nsegs = 0; |
109 110 if (!q->limits.max_write_zeroes_sectors) 111 return NULL; 112 113 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) 114 return NULL; 115 116 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); --- 80 unchanged lines hidden (view full) --- 197static struct bio *blk_bio_segment_split(struct request_queue *q, 198 struct bio *bio, 199 struct bio_set *bs, 200 unsigned *segs) 201{ 202 struct bio_vec bv, bvprv, *bvprvp = NULL; 203 struct bvec_iter iter; 204 unsigned nsegs = 0, sectors = 0; | 109 110 if (!q->limits.max_write_zeroes_sectors) 111 return NULL; 112 113 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) 114 return NULL; 115 116 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); --- 80 unchanged lines hidden (view full) --- 197static struct bio *blk_bio_segment_split(struct request_queue *q, 198 struct bio *bio, 199 struct bio_set *bs, 200 unsigned *segs) 201{ 202 struct bio_vec bv, bvprv, *bvprvp = NULL; 203 struct bvec_iter iter; 204 unsigned nsegs = 0, sectors = 0; |
205 bool do_split = true; 206 struct bio *new = NULL; | |
207 const unsigned max_sectors = get_max_io_size(q, bio); 208 const unsigned max_segs = queue_max_segments(q); 209 210 bio_for_each_bvec(bv, bio, iter) { 211 /* 212 * If the queue doesn't support SG gaps and adding this 213 * offset would create a gap, disallow it. 214 */ --- 25 unchanged lines hidden (view full) --- 240 nsegs++; 241 sectors += bv.bv_len >> 9; 242 } else if (bvec_split_segs(q, &bv, &nsegs, §ors, 243 max_segs)) { 244 goto split; 245 } 246 } 247 | 205 const unsigned max_sectors = get_max_io_size(q, bio); 206 const unsigned max_segs = queue_max_segments(q); 207 208 bio_for_each_bvec(bv, bio, iter) { 209 /* 210 * If the queue doesn't support SG gaps and adding this 211 * offset would create a gap, disallow it. 212 */ --- 25 unchanged lines hidden (view full) --- 238 nsegs++; 239 sectors += bv.bv_len >> 9; 240 } else if (bvec_split_segs(q, &bv, &nsegs, §ors, 241 max_segs)) { 242 goto split; 243 } 244 } 245 |
248 do_split = false; | 246 *segs = nsegs; 247 return NULL; |
249split: 250 *segs = nsegs; | 248split: 249 *segs = nsegs; |
251 252 if (do_split) { 253 new = bio_split(bio, sectors, GFP_NOIO, bs); 254 if (new) 255 bio = new; 256 } 257 258 return do_split ? new : NULL; | 250 return bio_split(bio, sectors, GFP_NOIO, bs); |
259} 260 | 251} 252 |
261void blk_queue_split(struct request_queue *q, struct bio **bio) | 253void __blk_queue_split(struct request_queue *q, struct bio **bio, 254 unsigned int *nr_segs) |
262{ | 255{ |
263 struct bio *split, *res; 264 unsigned nsegs; | 256 struct bio *split; |
265 266 switch (bio_op(*bio)) { 267 case REQ_OP_DISCARD: 268 case REQ_OP_SECURE_ERASE: | 257 258 switch (bio_op(*bio)) { 259 case REQ_OP_DISCARD: 260 case REQ_OP_SECURE_ERASE: |
269 split = blk_bio_discard_split(q, *bio, &q->bio_split, &nsegs); | 261 split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs); |
270 break; 271 case REQ_OP_WRITE_ZEROES: | 262 break; 263 case REQ_OP_WRITE_ZEROES: |
272 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, &nsegs); | 264 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, 265 nr_segs); |
273 break; 274 case REQ_OP_WRITE_SAME: | 266 break; 267 case REQ_OP_WRITE_SAME: |
275 split = blk_bio_write_same_split(q, *bio, &q->bio_split, &nsegs); | 268 split = blk_bio_write_same_split(q, *bio, &q->bio_split, 269 nr_segs); |
276 break; 277 default: | 270 break; 271 default: |
278 split = blk_bio_segment_split(q, *bio, &q->bio_split, &nsegs); | 272 split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); |
279 break; 280 } 281 | 273 break; 274 } 275 |
282 /* physical segments can be figured out during splitting */ 283 res = split ? split : *bio; 284 res->bi_phys_segments = nsegs; 285 bio_set_flag(res, BIO_SEG_VALID); 286 | |
287 if (split) { 288 /* there isn't chance to merge the splitted bio */ 289 split->bi_opf |= REQ_NOMERGE; 290 291 /* 292 * Since we're recursing into make_request here, ensure 293 * that we mark this bio as already having entered the queue. 294 * If not, and the queue is going away, we can get stuck --- 4 unchanged lines hidden (view full) --- 299 bio_set_flag(*bio, BIO_QUEUE_ENTERED); 300 301 bio_chain(split, *bio); 302 trace_block_split(q, split, (*bio)->bi_iter.bi_sector); 303 generic_make_request(*bio); 304 *bio = split; 305 } 306} | 276 if (split) { 277 /* there isn't chance to merge the splitted bio */ 278 split->bi_opf |= REQ_NOMERGE; 279 280 /* 281 * Since we're recursing into make_request here, ensure 282 * that we mark this bio as already having entered the queue. 283 * If not, and the queue is going away, we can get stuck --- 4 unchanged lines hidden (view full) --- 288 bio_set_flag(*bio, BIO_QUEUE_ENTERED); 289 290 bio_chain(split, *bio); 291 trace_block_split(q, split, (*bio)->bi_iter.bi_sector); 292 generic_make_request(*bio); 293 *bio = split; 294 } 295} |
296 297void blk_queue_split(struct request_queue *q, struct bio **bio) 298{ 299 unsigned int nr_segs; 300 301 __blk_queue_split(q, bio, &nr_segs); 302} |
|
307EXPORT_SYMBOL(blk_queue_split); 308 | 303EXPORT_SYMBOL(blk_queue_split); 304 |
309static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 310 struct bio *bio) | 305unsigned int blk_recalc_rq_segments(struct request *rq) |
311{ 312 unsigned int nr_phys_segs = 0; | 306{ 307 unsigned int nr_phys_segs = 0; |
313 struct bvec_iter iter; | 308 struct req_iterator iter; |
314 struct bio_vec bv; 315 | 309 struct bio_vec bv; 310 |
316 if (!bio) | 311 if (!rq->bio) |
317 return 0; 318 | 312 return 0; 313 |
319 switch (bio_op(bio)) { | 314 switch (bio_op(rq->bio)) { |
320 case REQ_OP_DISCARD: 321 case REQ_OP_SECURE_ERASE: 322 case REQ_OP_WRITE_ZEROES: 323 return 0; 324 case REQ_OP_WRITE_SAME: 325 return 1; 326 } 327 | 315 case REQ_OP_DISCARD: 316 case REQ_OP_SECURE_ERASE: 317 case REQ_OP_WRITE_ZEROES: 318 return 0; 319 case REQ_OP_WRITE_SAME: 320 return 1; 321 } 322 |
328 for_each_bio(bio) { 329 bio_for_each_bvec(bv, bio, iter) 330 bvec_split_segs(q, &bv, &nr_phys_segs, NULL, UINT_MAX); 331 } 332 | 323 rq_for_each_bvec(bv, rq, iter) 324 bvec_split_segs(rq->q, &bv, &nr_phys_segs, NULL, UINT_MAX); |
333 return nr_phys_segs; 334} 335 | 325 return nr_phys_segs; 326} 327 |
336void blk_recalc_rq_segments(struct request *rq) 337{ 338 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); 339} 340 341void blk_recount_segments(struct request_queue *q, struct bio *bio) 342{ 343 struct bio *nxt = bio->bi_next; 344 345 bio->bi_next = NULL; 346 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); 347 bio->bi_next = nxt; 348 349 bio_set_flag(bio, BIO_SEG_VALID); 350} 351 | |
352static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, 353 struct scatterlist *sglist) 354{ 355 if (!*sg) 356 return sglist; 357 358 /* 359 * If the driver previously mapped a shorter list, we could see a --- 154 unchanged lines hidden (view full) --- 514 * segment is bigger than number of req's physical segments 515 */ 516 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); 517 518 return nsegs; 519} 520EXPORT_SYMBOL(blk_rq_map_sg); 521 | 328static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, 329 struct scatterlist *sglist) 330{ 331 if (!*sg) 332 return sglist; 333 334 /* 335 * If the driver previously mapped a shorter list, we could see a --- 154 unchanged lines hidden (view full) --- 490 * segment is bigger than number of req's physical segments 491 */ 492 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); 493 494 return nsegs; 495} 496EXPORT_SYMBOL(blk_rq_map_sg); 497 |
522static inline int ll_new_hw_segment(struct request_queue *q, 523 struct request *req, 524 struct bio *bio) | 498static inline int ll_new_hw_segment(struct request *req, struct bio *bio, 499 unsigned int nr_phys_segs) |
525{ | 500{ |
526 int nr_phys_segs = bio_phys_segments(q, bio); 527 528 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) | 501 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q)) |
529 goto no_merge; 530 | 502 goto no_merge; 503 |
531 if (blk_integrity_merge_bio(q, req, bio) == false) | 504 if (blk_integrity_merge_bio(req->q, req, bio) == false) |
532 goto no_merge; 533 534 /* 535 * This will form the start of a new hw segment. Bump both 536 * counters. 537 */ 538 req->nr_phys_segments += nr_phys_segs; 539 return 1; 540 541no_merge: | 505 goto no_merge; 506 507 /* 508 * This will form the start of a new hw segment. Bump both 509 * counters. 510 */ 511 req->nr_phys_segments += nr_phys_segs; 512 return 1; 513 514no_merge: |
542 req_set_nomerge(q, req); | 515 req_set_nomerge(req->q, req); |
543 return 0; 544} 545 | 516 return 0; 517} 518 |
546int ll_back_merge_fn(struct request_queue *q, struct request *req, 547 struct bio *bio) | 519int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) |
548{ 549 if (req_gap_back_merge(req, bio)) 550 return 0; 551 if (blk_integrity_rq(req) && 552 integrity_req_gap_back_merge(req, bio)) 553 return 0; 554 if (blk_rq_sectors(req) + bio_sectors(bio) > 555 blk_rq_get_max_sectors(req, blk_rq_pos(req))) { | 520{ 521 if (req_gap_back_merge(req, bio)) 522 return 0; 523 if (blk_integrity_rq(req) && 524 integrity_req_gap_back_merge(req, bio)) 525 return 0; 526 if (blk_rq_sectors(req) + bio_sectors(bio) > 527 blk_rq_get_max_sectors(req, blk_rq_pos(req))) { |
556 req_set_nomerge(q, req); | 528 req_set_nomerge(req->q, req); |
557 return 0; 558 } | 529 return 0; 530 } |
559 if (!bio_flagged(req->biotail, BIO_SEG_VALID)) 560 blk_recount_segments(q, req->biotail); 561 if (!bio_flagged(bio, BIO_SEG_VALID)) 562 blk_recount_segments(q, bio); | |
563 | 531 |
564 return ll_new_hw_segment(q, req, bio); | 532 return ll_new_hw_segment(req, bio, nr_segs); |
565} 566 | 533} 534 |
567int ll_front_merge_fn(struct request_queue *q, struct request *req, 568 struct bio *bio) | 535int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) |
569{ | 536{ |
570 | |
571 if (req_gap_front_merge(req, bio)) 572 return 0; 573 if (blk_integrity_rq(req) && 574 integrity_req_gap_front_merge(req, bio)) 575 return 0; 576 if (blk_rq_sectors(req) + bio_sectors(bio) > 577 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { | 537 if (req_gap_front_merge(req, bio)) 538 return 0; 539 if (blk_integrity_rq(req) && 540 integrity_req_gap_front_merge(req, bio)) 541 return 0; 542 if (blk_rq_sectors(req) + bio_sectors(bio) > 543 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { |
578 req_set_nomerge(q, req); | 544 req_set_nomerge(req->q, req); |
579 return 0; 580 } | 545 return 0; 546 } |
581 if (!bio_flagged(bio, BIO_SEG_VALID)) 582 blk_recount_segments(q, bio); 583 if (!bio_flagged(req->bio, BIO_SEG_VALID)) 584 blk_recount_segments(q, req->bio); | |
585 | 547 |
586 return ll_new_hw_segment(q, req, bio); | 548 return ll_new_hw_segment(req, bio, nr_segs); |
587} 588 589static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, 590 struct request *next) 591{ 592 unsigned short segments = blk_rq_nr_discard_segments(req); 593 594 if (segments >= queue_max_discard_segments(q)) --- 286 unchanged lines hidden --- | 549} 550 551static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, 552 struct request *next) 553{ 554 unsigned short segments = blk_rq_nr_discard_segments(req); 555 556 if (segments >= queue_max_discard_segments(q)) --- 286 unchanged lines hidden --- |