blk-mq.c (1956ecf4c6ed4f7e4f728c360f378937d249bcd4) blk-mq.c (dbc1625fc9deefb352f6ff26a575ae4b3ddef23a)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Block multiqueue core code
4 *
5 * Copyright (C) 2013-2014 Jens Axboe
6 * Copyright (C) 2013-2014 Christoph Hellwig
7 */
8#include <linux/kernel.h>

--- 655 unchanged lines hidden (view full) ---

664 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
665}
666EXPORT_SYMBOL_GPL(blk_mq_request_started);
667
668void blk_mq_start_request(struct request *rq)
669{
670 struct request_queue *q = rq->q;
671
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Block multiqueue core code
4 *
5 * Copyright (C) 2013-2014 Jens Axboe
6 * Copyright (C) 2013-2014 Christoph Hellwig
7 */
8#include <linux/kernel.h>

--- 655 unchanged lines hidden (view full) ---

664 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
665}
666EXPORT_SYMBOL_GPL(blk_mq_request_started);
667
668void blk_mq_start_request(struct request *rq)
669{
670 struct request_queue *q = rq->q;
671
672 blk_mq_sched_started_request(rq);
673
672 trace_block_rq_issue(q, rq);
673
674 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
675 rq->io_start_time_ns = ktime_get_ns();
676#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
677 rq->throtl_size = blk_rq_sectors(rq);
678#endif
679 rq->rq_flags |= RQF_STATS;

--- 1273 unchanged lines hidden (view full) ---

1953 return BLK_QC_T_NONE;
1954
1955 rq_qos_throttle(q, bio);
1956
1957 data.cmd_flags = bio->bi_opf;
1958 rq = blk_mq_get_request(q, bio, &data);
1959 if (unlikely(!rq)) {
1960 rq_qos_cleanup(q, bio);
674 trace_block_rq_issue(q, rq);
675
676 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
677 rq->io_start_time_ns = ktime_get_ns();
678#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
679 rq->throtl_size = blk_rq_sectors(rq);
680#endif
681 rq->rq_flags |= RQF_STATS;

--- 1273 unchanged lines hidden (view full) ---

1955 return BLK_QC_T_NONE;
1956
1957 rq_qos_throttle(q, bio);
1958
1959 data.cmd_flags = bio->bi_opf;
1960 rq = blk_mq_get_request(q, bio, &data);
1961 if (unlikely(!rq)) {
1962 rq_qos_cleanup(q, bio);
1961
1962 cookie = BLK_QC_T_NONE;
1963 if (bio->bi_opf & REQ_NOWAIT_INLINE)
1964 cookie = BLK_QC_T_EAGAIN;
1965 else if (bio->bi_opf & REQ_NOWAIT)
1963 if (bio->bi_opf & REQ_NOWAIT)
1966 bio_wouldblock_error(bio);
1964 bio_wouldblock_error(bio);
1967 return cookie;
1965 return BLK_QC_T_NONE;
1968 }
1969
1970 trace_block_getrq(q, bio, bio->bi_opf);
1971
1972 rq_qos_track(q, rq, bio);
1973
1974 cookie = request_to_qc_t(data.hctx, rq);
1975

--- 1436 unchanged lines hidden (view full) ---

3412
3413 /*
3414 * This will be replaced with the stats tracking code, using
3415 * 'avg_completion_time / 2' as the pre-sleep target.
3416 */
3417 kt = nsecs;
3418
3419 mode = HRTIMER_MODE_REL;
1966 }
1967
1968 trace_block_getrq(q, bio, bio->bi_opf);
1969
1970 rq_qos_track(q, rq, bio);
1971
1972 cookie = request_to_qc_t(data.hctx, rq);
1973

--- 1436 unchanged lines hidden (view full) ---

3410
3411 /*
3412 * This will be replaced with the stats tracking code, using
3413 * 'avg_completion_time / 2' as the pre-sleep target.
3414 */
3415 kt = nsecs;
3416
3417 mode = HRTIMER_MODE_REL;
3420 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
3418 hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
3421 hrtimer_set_expires(&hs.timer, kt);
3422
3419 hrtimer_set_expires(&hs.timer, kt);
3420
3423 hrtimer_init_sleeper(&hs, current);
3424 do {
3425 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
3426 break;
3427 set_current_state(TASK_UNINTERRUPTIBLE);
3428 hrtimer_start_expires(&hs.timer, mode);
3429 if (hs.task)
3430 io_schedule();
3431 hrtimer_cancel(&hs.timer);

--- 112 unchanged lines hidden ---
3421 do {
3422 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
3423 break;
3424 set_current_state(TASK_UNINTERRUPTIBLE);
3425 hrtimer_start_expires(&hs.timer, mode);
3426 if (hs.task)
3427 io_schedule();
3428 hrtimer_cancel(&hs.timer);

--- 112 unchanged lines hidden ---