Lines Matching refs:q

52 static inline bool bio_will_gap(struct request_queue *q,  in bio_will_gap()  argument
57 if (!bio_has_data(prev) || !queue_virt_boundary(q)) in bio_will_gap()
69 if (pb.bv_offset & queue_virt_boundary(q)) in bio_will_gap()
83 if (biovec_phys_mergeable(q, &pb, &nb)) in bio_will_gap()
85 return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset); in bio_will_gap()
90 return bio_will_gap(req->q, req, req->biotail, bio); in req_gap_back_merge()
95 return bio_will_gap(req->q, NULL, bio, req->bio); in req_gap_front_merge()
425 if (queue_max_discard_segments(rq->q) > 1) { in blk_recalc_rq_segments()
440 bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes, in blk_recalc_rq_segments()
462 static unsigned blk_bvec_map_sg(struct request_queue *q, in blk_bvec_map_sg() argument
471 unsigned len = min(get_max_segment_size(&q->limits, in blk_bvec_map_sg()
507 __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec, in __blk_segment_map_sg_merge() argument
516 if ((*sg)->length + nbytes > queue_max_segment_size(q)) in __blk_segment_map_sg_merge()
519 if (!biovec_phys_mergeable(q, bvprv, bvec)) in __blk_segment_map_sg_merge()
527 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, in __blk_bios_map_sg() argument
544 __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg)) in __blk_bios_map_sg()
550 nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg); in __blk_bios_map_sg()
567 int __blk_rq_map_sg(struct request_queue *q, struct request *rq, in __blk_rq_map_sg() argument
575 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg); in __blk_rq_map_sg()
593 struct request_queue *q = rq->q; in blk_rq_get_max_sectors() local
597 return q->limits.max_hw_sectors; in blk_rq_get_max_sectors()
599 max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); in blk_rq_get_max_sectors()
600 if (!q->limits.chunk_sectors || in blk_rq_get_max_sectors()
605 blk_chunk_sectors_left(offset, q->limits.chunk_sectors)); in blk_rq_get_max_sectors()
614 if (blk_integrity_merge_bio(req->q, req, bio) == false) in ll_new_hw_segment()
632 req_set_nomerge(req->q, req); in ll_new_hw_segment()
647 req_set_nomerge(req->q, req); in ll_back_merge_fn()
666 req_set_nomerge(req->q, req); in ll_front_merge_fn()
673 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, in req_attempt_discard_merge() argument
678 if (segments >= queue_max_discard_segments(q)) in req_attempt_discard_merge()
687 req_set_nomerge(q, req); in req_attempt_discard_merge()
691 static int ll_merge_requests_fn(struct request_queue *q, struct request *req, in ll_merge_requests_fn() argument
713 if (blk_integrity_merge_rq(q, req, next) == false) in ll_merge_requests_fn()
807 static struct request *attempt_merge(struct request_queue *q, in attempt_merge() argument
832 if (!req_attempt_discard_merge(q, req, next)) in attempt_merge()
836 if (!ll_merge_requests_fn(q, req, next)) in attempt_merge()
870 elv_merge_requests(q, req, next); in attempt_merge()
889 static struct request *attempt_back_merge(struct request_queue *q, in attempt_back_merge() argument
892 struct request *next = elv_latter_request(q, rq); in attempt_back_merge()
895 return attempt_merge(q, rq, next); in attempt_back_merge()
900 static struct request *attempt_front_merge(struct request_queue *q, in attempt_front_merge() argument
903 struct request *prev = elv_former_request(q, rq); in attempt_front_merge()
906 return attempt_merge(q, prev, rq); in attempt_front_merge()
916 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, in blk_attempt_req_merge() argument
919 return attempt_merge(q, rq, next); in blk_attempt_req_merge()
939 if (blk_integrity_merge_bio(rq->q, rq, bio) == false) in blk_rq_merge_ok()
988 rq_qos_merge(req->q, req, bio); in bio_attempt_back_merge()
1014 rq_qos_merge(req->q, req, bio); in bio_attempt_front_merge()
1033 static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q, in bio_attempt_discard_merge() argument
1038 if (segments >= queue_max_discard_segments(q)) in bio_attempt_discard_merge()
1044 rq_qos_merge(q, req, bio); in bio_attempt_discard_merge()
1054 req_set_nomerge(q, req); in bio_attempt_discard_merge()
1058 static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q, in blk_attempt_bio_merge() argument
1069 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) in blk_attempt_bio_merge()
1073 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) in blk_attempt_bio_merge()
1077 return bio_attempt_discard_merge(q, rq, bio); in blk_attempt_bio_merge()
1105 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, in blk_attempt_plug_merge() argument
1116 if (rq->q == q) { in blk_attempt_plug_merge()
1117 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == in blk_attempt_plug_merge()
1137 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, in blk_bio_list_merge() argument
1147 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) { in blk_bio_list_merge()
1162 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, in blk_mq_sched_try_merge() argument
1167 switch (elv_merge(q, &rq, bio)) { in blk_mq_sched_try_merge()
1169 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
1173 *merged_request = attempt_back_merge(q, rq); in blk_mq_sched_try_merge()
1175 elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); in blk_mq_sched_try_merge()
1178 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
1182 *merged_request = attempt_front_merge(q, rq); in blk_mq_sched_try_merge()
1184 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); in blk_mq_sched_try_merge()
1187 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK; in blk_mq_sched_try_merge()