xref: /openbmc/linux/block/blk-merge.c (revision 3b64b188)
1 /*
2  * Functions related to segment and merge handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
9 
10 #include "blk.h"
11 
12 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
13 					     struct bio *bio)
14 {
15 	struct bio_vec *bv, *bvprv = NULL;
16 	int cluster, i, high, highprv = 1;
17 	unsigned int seg_size, nr_phys_segs;
18 	struct bio *fbio, *bbio;
19 
20 	if (!bio)
21 		return 0;
22 
23 	fbio = bio;
24 	cluster = blk_queue_cluster(q);
25 	seg_size = 0;
26 	nr_phys_segs = 0;
27 	for_each_bio(bio) {
28 		bio_for_each_segment(bv, bio, i) {
29 			/*
30 			 * the trick here is making sure that a high page is
31 			 * never considered part of another segment, since that
32 			 * might change with the bounce page.
33 			 */
34 			high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
35 			if (high || highprv)
36 				goto new_segment;
37 			if (cluster) {
38 				if (seg_size + bv->bv_len
39 				    > queue_max_segment_size(q))
40 					goto new_segment;
41 				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
42 					goto new_segment;
43 				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
44 					goto new_segment;
45 
46 				seg_size += bv->bv_len;
47 				bvprv = bv;
48 				continue;
49 			}
50 new_segment:
51 			if (nr_phys_segs == 1 && seg_size >
52 			    fbio->bi_seg_front_size)
53 				fbio->bi_seg_front_size = seg_size;
54 
55 			nr_phys_segs++;
56 			bvprv = bv;
57 			seg_size = bv->bv_len;
58 			highprv = high;
59 		}
60 		bbio = bio;
61 	}
62 
63 	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
64 		fbio->bi_seg_front_size = seg_size;
65 	if (seg_size > bbio->bi_seg_back_size)
66 		bbio->bi_seg_back_size = seg_size;
67 
68 	return nr_phys_segs;
69 }
70 
71 void blk_recalc_rq_segments(struct request *rq)
72 {
73 	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
74 }
75 
76 void blk_recount_segments(struct request_queue *q, struct bio *bio)
77 {
78 	struct bio *nxt = bio->bi_next;
79 
80 	bio->bi_next = NULL;
81 	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
82 	bio->bi_next = nxt;
83 	bio->bi_flags |= (1 << BIO_SEG_VALID);
84 }
85 EXPORT_SYMBOL(blk_recount_segments);
86 
87 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
88 				   struct bio *nxt)
89 {
90 	if (!blk_queue_cluster(q))
91 		return 0;
92 
93 	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
94 	    queue_max_segment_size(q))
95 		return 0;
96 
97 	if (!bio_has_data(bio))
98 		return 1;
99 
100 	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
101 		return 0;
102 
103 	/*
104 	 * bio and nxt are contiguous in memory; check if the queue allows
105 	 * these two to be merged into one
106 	 */
107 	if (BIO_SEG_BOUNDARY(q, bio, nxt))
108 		return 1;
109 
110 	return 0;
111 }
112 
113 static void
114 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
115 		     struct scatterlist *sglist, struct bio_vec **bvprv,
116 		     struct scatterlist **sg, int *nsegs, int *cluster)
117 {
118 
119 	int nbytes = bvec->bv_len;
120 
121 	if (*bvprv && *cluster) {
122 		if ((*sg)->length + nbytes > queue_max_segment_size(q))
123 			goto new_segment;
124 
125 		if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
126 			goto new_segment;
127 		if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
128 			goto new_segment;
129 
130 		(*sg)->length += nbytes;
131 	} else {
132 new_segment:
133 		if (!*sg)
134 			*sg = sglist;
135 		else {
136 			/*
137 			 * If the driver previously mapped a shorter
138 			 * list, we could see a termination bit
139 			 * prematurely unless it fully inits the sg
140 			 * table on each mapping. We KNOW that there
141 			 * must be more entries here or the driver
142 			 * would be buggy, so force clear the
143 			 * termination bit to avoid doing a full
144 			 * sg_init_table() in drivers for each command.
145 			 */
146 			(*sg)->page_link &= ~0x02;
147 			*sg = sg_next(*sg);
148 		}
149 
150 		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
151 		(*nsegs)++;
152 	}
153 	*bvprv = bvec;
154 }
155 
156 /*
157  * map a request to scatterlist, return number of sg entries setup. Caller
158  * must make sure sg can hold rq->nr_phys_segments entries
159  */
160 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
161 		  struct scatterlist *sglist)
162 {
163 	struct bio_vec *bvec, *bvprv;
164 	struct req_iterator iter;
165 	struct scatterlist *sg;
166 	int nsegs, cluster;
167 
168 	nsegs = 0;
169 	cluster = blk_queue_cluster(q);
170 
171 	/*
172 	 * for each bio in rq
173 	 */
174 	bvprv = NULL;
175 	sg = NULL;
176 	rq_for_each_segment(bvec, rq, iter) {
177 		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
178 				     &nsegs, &cluster);
179 	} /* segments in rq */
180 
181 
182 	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
183 	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
184 		unsigned int pad_len =
185 			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
186 
187 		sg->length += pad_len;
188 		rq->extra_len += pad_len;
189 	}
190 
191 	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
192 		if (rq->cmd_flags & REQ_WRITE)
193 			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
194 
195 		sg->page_link &= ~0x02;
196 		sg = sg_next(sg);
197 		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
198 			    q->dma_drain_size,
199 			    ((unsigned long)q->dma_drain_buffer) &
200 			    (PAGE_SIZE - 1));
201 		nsegs++;
202 		rq->extra_len += q->dma_drain_size;
203 	}
204 
205 	if (sg)
206 		sg_mark_end(sg);
207 
208 	return nsegs;
209 }
210 EXPORT_SYMBOL(blk_rq_map_sg);
211 
212 /**
213  * blk_bio_map_sg - map a bio to a scatterlist
214  * @q: request_queue in question
215  * @bio: bio being mapped
216  * @sglist: scatterlist being mapped
217  *
218  * Note:
219  *    Caller must make sure sg can hold bio->bi_phys_segments entries
220  *
221  * Will return the number of sg entries setup
222  */
223 int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
224 		   struct scatterlist *sglist)
225 {
226 	struct bio_vec *bvec, *bvprv;
227 	struct scatterlist *sg;
228 	int nsegs, cluster;
229 	unsigned long i;
230 
231 	nsegs = 0;
232 	cluster = blk_queue_cluster(q);
233 
234 	bvprv = NULL;
235 	sg = NULL;
236 	bio_for_each_segment(bvec, bio, i) {
237 		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
238 				     &nsegs, &cluster);
239 	} /* segments in bio */
240 
241 	if (sg)
242 		sg_mark_end(sg);
243 
244 	BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
245 	return nsegs;
246 }
247 EXPORT_SYMBOL(blk_bio_map_sg);
248 
249 static inline int ll_new_hw_segment(struct request_queue *q,
250 				    struct request *req,
251 				    struct bio *bio)
252 {
253 	int nr_phys_segs = bio_phys_segments(q, bio);
254 
255 	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
256 		goto no_merge;
257 
258 	if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
259 		goto no_merge;
260 
261 	/*
262 	 * This will form the start of a new hw segment.  Bump both
263 	 * counters.
264 	 */
265 	req->nr_phys_segments += nr_phys_segs;
266 	return 1;
267 
268 no_merge:
269 	req->cmd_flags |= REQ_NOMERGE;
270 	if (req == q->last_merge)
271 		q->last_merge = NULL;
272 	return 0;
273 }
274 
275 int ll_back_merge_fn(struct request_queue *q, struct request *req,
276 		     struct bio *bio)
277 {
278 	unsigned short max_sectors;
279 
280 	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
281 		max_sectors = queue_max_hw_sectors(q);
282 	else
283 		max_sectors = queue_max_sectors(q);
284 
285 	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
286 		req->cmd_flags |= REQ_NOMERGE;
287 		if (req == q->last_merge)
288 			q->last_merge = NULL;
289 		return 0;
290 	}
291 	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
292 		blk_recount_segments(q, req->biotail);
293 	if (!bio_flagged(bio, BIO_SEG_VALID))
294 		blk_recount_segments(q, bio);
295 
296 	return ll_new_hw_segment(q, req, bio);
297 }
298 
299 int ll_front_merge_fn(struct request_queue *q, struct request *req,
300 		      struct bio *bio)
301 {
302 	unsigned short max_sectors;
303 
304 	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
305 		max_sectors = queue_max_hw_sectors(q);
306 	else
307 		max_sectors = queue_max_sectors(q);
308 
309 
310 	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
311 		req->cmd_flags |= REQ_NOMERGE;
312 		if (req == q->last_merge)
313 			q->last_merge = NULL;
314 		return 0;
315 	}
316 	if (!bio_flagged(bio, BIO_SEG_VALID))
317 		blk_recount_segments(q, bio);
318 	if (!bio_flagged(req->bio, BIO_SEG_VALID))
319 		blk_recount_segments(q, req->bio);
320 
321 	return ll_new_hw_segment(q, req, bio);
322 }
323 
324 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
325 				struct request *next)
326 {
327 	int total_phys_segments;
328 	unsigned int seg_size =
329 		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
330 
331 	/*
332 	 * First check if the either of the requests are re-queued
333 	 * requests.  Can't merge them if they are.
334 	 */
335 	if (req->special || next->special)
336 		return 0;
337 
338 	/*
339 	 * Will it become too large?
340 	 */
341 	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
342 		return 0;
343 
344 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
345 	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
346 		if (req->nr_phys_segments == 1)
347 			req->bio->bi_seg_front_size = seg_size;
348 		if (next->nr_phys_segments == 1)
349 			next->biotail->bi_seg_back_size = seg_size;
350 		total_phys_segments--;
351 	}
352 
353 	if (total_phys_segments > queue_max_segments(q))
354 		return 0;
355 
356 	if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
357 		return 0;
358 
359 	/* Merge is OK... */
360 	req->nr_phys_segments = total_phys_segments;
361 	return 1;
362 }
363 
364 /**
365  * blk_rq_set_mixed_merge - mark a request as mixed merge
366  * @rq: request to mark as mixed merge
367  *
368  * Description:
369  *     @rq is about to be mixed merged.  Make sure the attributes
370  *     which can be mixed are set in each bio and mark @rq as mixed
371  *     merged.
372  */
373 void blk_rq_set_mixed_merge(struct request *rq)
374 {
375 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
376 	struct bio *bio;
377 
378 	if (rq->cmd_flags & REQ_MIXED_MERGE)
379 		return;
380 
381 	/*
382 	 * @rq will no longer represent mixable attributes for all the
383 	 * contained bios.  It will just track those of the first one.
384 	 * Distributes the attributs to each bio.
385 	 */
386 	for (bio = rq->bio; bio; bio = bio->bi_next) {
387 		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
388 			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
389 		bio->bi_rw |= ff;
390 	}
391 	rq->cmd_flags |= REQ_MIXED_MERGE;
392 }
393 
394 static void blk_account_io_merge(struct request *req)
395 {
396 	if (blk_do_io_stat(req)) {
397 		struct hd_struct *part;
398 		int cpu;
399 
400 		cpu = part_stat_lock();
401 		part = req->part;
402 
403 		part_round_stats(cpu, part);
404 		part_dec_in_flight(part, rq_data_dir(req));
405 
406 		hd_struct_put(part);
407 		part_stat_unlock();
408 	}
409 }
410 
411 /*
412  * Has to be called with the request spinlock acquired
413  */
414 static int attempt_merge(struct request_queue *q, struct request *req,
415 			  struct request *next)
416 {
417 	if (!rq_mergeable(req) || !rq_mergeable(next))
418 		return 0;
419 
420 	/*
421 	 * Don't merge file system requests and discard requests
422 	 */
423 	if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
424 		return 0;
425 
426 	/*
427 	 * Don't merge discard requests and secure discard requests
428 	 */
429 	if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
430 		return 0;
431 
432 	/*
433 	 * not contiguous
434 	 */
435 	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
436 		return 0;
437 
438 	if (rq_data_dir(req) != rq_data_dir(next)
439 	    || req->rq_disk != next->rq_disk
440 	    || next->special)
441 		return 0;
442 
443 	/*
444 	 * If we are allowed to merge, then append bio list
445 	 * from next to rq and release next. merge_requests_fn
446 	 * will have updated segment counts, update sector
447 	 * counts here.
448 	 */
449 	if (!ll_merge_requests_fn(q, req, next))
450 		return 0;
451 
452 	/*
453 	 * If failfast settings disagree or any of the two is already
454 	 * a mixed merge, mark both as mixed before proceeding.  This
455 	 * makes sure that all involved bios have mixable attributes
456 	 * set properly.
457 	 */
458 	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
459 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
460 	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
461 		blk_rq_set_mixed_merge(req);
462 		blk_rq_set_mixed_merge(next);
463 	}
464 
465 	/*
466 	 * At this point we have either done a back merge
467 	 * or front merge. We need the smaller start_time of
468 	 * the merged requests to be the current request
469 	 * for accounting purposes.
470 	 */
471 	if (time_after(req->start_time, next->start_time))
472 		req->start_time = next->start_time;
473 
474 	req->biotail->bi_next = next->bio;
475 	req->biotail = next->biotail;
476 
477 	req->__data_len += blk_rq_bytes(next);
478 
479 	elv_merge_requests(q, req, next);
480 
481 	/*
482 	 * 'next' is going away, so update stats accordingly
483 	 */
484 	blk_account_io_merge(next);
485 
486 	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
487 	if (blk_rq_cpu_valid(next))
488 		req->cpu = next->cpu;
489 
490 	/* owner-ship of bio passed from next to req */
491 	next->bio = NULL;
492 	__blk_put_request(q, next);
493 	return 1;
494 }
495 
496 int attempt_back_merge(struct request_queue *q, struct request *rq)
497 {
498 	struct request *next = elv_latter_request(q, rq);
499 
500 	if (next)
501 		return attempt_merge(q, rq, next);
502 
503 	return 0;
504 }
505 
506 int attempt_front_merge(struct request_queue *q, struct request *rq)
507 {
508 	struct request *prev = elv_former_request(q, rq);
509 
510 	if (prev)
511 		return attempt_merge(q, prev, rq);
512 
513 	return 0;
514 }
515 
516 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
517 			  struct request *next)
518 {
519 	return attempt_merge(q, rq, next);
520 }
521 
522 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
523 {
524 	if (!rq_mergeable(rq))
525 		return false;
526 
527 	/* don't merge file system requests and discard requests */
528 	if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
529 		return false;
530 
531 	/* don't merge discard requests and secure discard requests */
532 	if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
533 		return false;
534 
535 	/* different data direction or already started, don't merge */
536 	if (bio_data_dir(bio) != rq_data_dir(rq))
537 		return false;
538 
539 	/* must be same device and not a special request */
540 	if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
541 		return false;
542 
543 	/* only merge integrity protected bio into ditto rq */
544 	if (bio_integrity(bio) != blk_integrity_rq(rq))
545 		return false;
546 
547 	return true;
548 }
549 
550 int blk_try_merge(struct request *rq, struct bio *bio)
551 {
552 	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
553 		return ELEVATOR_BACK_MERGE;
554 	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
555 		return ELEVATOR_FRONT_MERGE;
556 	return ELEVATOR_NO_MERGE;
557 }
558