blk-merge.c (e700ac213a0f793fb4f83098413303e3dd080892) blk-merge.c (badf7f64378796d460c79eb0f182fa7282eb65d5)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions related to segment and merge handling
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>

--- 544 unchanged lines hidden (view full) ---

553
554static inline unsigned int blk_rq_get_max_segments(struct request *rq)
555{
556 if (req_op(rq) == REQ_OP_DISCARD)
557 return queue_max_discard_segments(rq->q);
558 return queue_max_segments(rq->q);
559}
560
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions related to segment and merge handling
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>

--- 544 unchanged lines hidden (view full) ---

553
554static inline unsigned int blk_rq_get_max_segments(struct request *rq)
555{
556 if (req_op(rq) == REQ_OP_DISCARD)
557 return queue_max_discard_segments(rq->q);
558 return queue_max_segments(rq->q);
559}
560
561static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
562 sector_t offset)
563{
564 struct request_queue *q = rq->q;
565
566 if (blk_rq_is_passthrough(rq))
567 return q->limits.max_hw_sectors;
568
569 if (!q->limits.chunk_sectors ||
570 req_op(rq) == REQ_OP_DISCARD ||
571 req_op(rq) == REQ_OP_SECURE_ERASE)
572 return blk_queue_get_max_sectors(q, req_op(rq));
573
574 return min(blk_max_size_offset(q, offset, 0),
575 blk_queue_get_max_sectors(q, req_op(rq)));
576}
577
561static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
562 unsigned int nr_phys_segs)
563{
564 if (blk_integrity_merge_bio(req->q, req, bio) == false)
565 goto no_merge;
566
567 /* discard request merge won't add new segment */
568 if (req_op(req) == REQ_OP_DISCARD)

--- 144 unchanged lines hidden (view full) ---

713 if (blk_discard_mergable(req))
714 return ELEVATOR_DISCARD_MERGE;
715 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
716 return ELEVATOR_BACK_MERGE;
717
718 return ELEVATOR_NO_MERGE;
719}
720
578static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
579 unsigned int nr_phys_segs)
580{
581 if (blk_integrity_merge_bio(req->q, req, bio) == false)
582 goto no_merge;
583
584 /* discard request merge won't add new segment */
585 if (req_op(req) == REQ_OP_DISCARD)

--- 144 unchanged lines hidden (view full) ---

730 if (blk_discard_mergable(req))
731 return ELEVATOR_DISCARD_MERGE;
732 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
733 return ELEVATOR_BACK_MERGE;
734
735 return ELEVATOR_NO_MERGE;
736}
737
738static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
739{
740 if (bio_page(a) == bio_page(b) && bio_offset(a) == bio_offset(b))
741 return true;
742 return false;
743}
744
721/*
722 * For non-mq, this has to be called with the request spinlock acquired.
723 * For mq with scheduling, the appropriate queue wide lock should be held.
724 */
725static struct request *attempt_merge(struct request_queue *q,
726 struct request *req, struct request *next)
727{
728 if (!rq_mergeable(req) || !rq_mergeable(next))

--- 408 unchanged lines hidden ---
745/*
746 * For non-mq, this has to be called with the request spinlock acquired.
747 * For mq with scheduling, the appropriate queue wide lock should be held.
748 */
749static struct request *attempt_merge(struct request_queue *q,
750 struct request *req, struct request *next)
751{
752 if (!rq_mergeable(req) || !rq_mergeable(next))

--- 408 unchanged lines hidden ---