1320ae51fSJens Axboe #ifndef INT_BLK_MQ_H 2320ae51fSJens Axboe #define INT_BLK_MQ_H 3320ae51fSJens Axboe 4cf43e6beSJens Axboe #include "blk-stat.h" 5cf43e6beSJens Axboe 624d2f903SChristoph Hellwig struct blk_mq_tag_set; 724d2f903SChristoph Hellwig 8320ae51fSJens Axboe struct blk_mq_ctx { 9320ae51fSJens Axboe struct { 10320ae51fSJens Axboe spinlock_t lock; 11320ae51fSJens Axboe struct list_head rq_list; 12320ae51fSJens Axboe } ____cacheline_aligned_in_smp; 13320ae51fSJens Axboe 14320ae51fSJens Axboe unsigned int cpu; 15320ae51fSJens Axboe unsigned int index_hw; 16320ae51fSJens Axboe 17320ae51fSJens Axboe /* incremented at dispatch time */ 18320ae51fSJens Axboe unsigned long rq_dispatched[2]; 19320ae51fSJens Axboe unsigned long rq_merged; 20320ae51fSJens Axboe 21320ae51fSJens Axboe /* incremented at completion time */ 22320ae51fSJens Axboe unsigned long ____cacheline_aligned_in_smp rq_completed[2]; 23320ae51fSJens Axboe 24320ae51fSJens Axboe struct request_queue *queue; 25320ae51fSJens Axboe struct kobject kobj; 264bb659b1SJens Axboe } ____cacheline_aligned_in_smp; 27320ae51fSJens Axboe 28320ae51fSJens Axboe void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 29780db207STejun Heo void blk_mq_freeze_queue(struct request_queue *q); 303edcc0ceSMing Lei void blk_mq_free_queue(struct request_queue *q); 31e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 32aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q); 3381380ca1SOmar Sandoval bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *); 342c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 3550e1dab8SJens Axboe bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx); 36bd6737f1SJens Axboe bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, 37bd6737f1SJens Axboe bool wait); 382c3ad667SJens Axboe 392c3ad667SJens Axboe /* 402c3ad667SJens Axboe * Internal helpers for allocating/freeing the request map 412c3ad667SJens Axboe */ 42cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 432c3ad667SJens Axboe unsigned int hctx_idx); 44cc71a6f4SJens Axboe void blk_mq_free_rq_map(struct blk_mq_tags *tags); 45cc71a6f4SJens Axboe struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 46cc71a6f4SJens Axboe unsigned int hctx_idx, 47cc71a6f4SJens Axboe unsigned int nr_tags, 48cc71a6f4SJens Axboe unsigned int reserved_tags); 49cc71a6f4SJens Axboe int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 50cc71a6f4SJens Axboe unsigned int hctx_idx, unsigned int depth); 512c3ad667SJens Axboe 522c3ad667SJens Axboe /* 532c3ad667SJens Axboe * Internal helpers for request insertion into sw queues 542c3ad667SJens Axboe */ 552c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 562c3ad667SJens Axboe bool at_head); 57bd166ef1SJens Axboe void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 58bd166ef1SJens Axboe struct list_head *list); 59320ae51fSJens Axboe 60320ae51fSJens Axboe /* 61320ae51fSJens Axboe * CPU -> queue mappings 62320ae51fSJens Axboe */ 63f14bbe77SJens Axboe extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); 64320ae51fSJens Axboe 657d7e0f90SChristoph Hellwig static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, 667d7e0f90SChristoph Hellwig int cpu) 677d7e0f90SChristoph Hellwig { 687d7e0f90SChristoph Hellwig return q->queue_hw_ctx[q->mq_map[cpu]]; 697d7e0f90SChristoph Hellwig } 707d7e0f90SChristoph Hellwig 71e93ecf60SJens Axboe /* 7267aec14cSJens Axboe * sysfs helpers 7367aec14cSJens Axboe */ 74737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q); 757ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q); 762d0364c8SBart Van Assche extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q); 7767aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q); 7867aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q); 79868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); 8067aec14cSJens Axboe 8190415837SChristoph Hellwig extern void blk_mq_rq_timed_out(struct request *req, bool reserved); 8290415837SChristoph Hellwig 83e09aae7eSMing Lei void blk_mq_release(struct request_queue *q); 84e09aae7eSMing Lei 851aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, 861aecfe48SMing Lei unsigned int cpu) 871aecfe48SMing Lei { 881aecfe48SMing Lei return per_cpu_ptr(q->queue_ctx, cpu); 891aecfe48SMing Lei } 901aecfe48SMing Lei 911aecfe48SMing Lei /* 921aecfe48SMing Lei * This assumes per-cpu software queueing queues. They could be per-node 931aecfe48SMing Lei * as well, for instance. For now this is hardcoded as-is. Note that we don't 941aecfe48SMing Lei * care about preemption, since we know the ctx's are persistent. This does 951aecfe48SMing Lei * mean that we can't rely on ctx always matching the currently running CPU. 961aecfe48SMing Lei */ 971aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) 981aecfe48SMing Lei { 991aecfe48SMing Lei return __blk_mq_get_ctx(q, get_cpu()); 1001aecfe48SMing Lei } 1011aecfe48SMing Lei 1021aecfe48SMing Lei static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) 1031aecfe48SMing Lei { 1041aecfe48SMing Lei put_cpu(); 1051aecfe48SMing Lei } 1061aecfe48SMing Lei 107cb96a42cSMing Lei struct blk_mq_alloc_data { 108cb96a42cSMing Lei /* input parameter */ 109cb96a42cSMing Lei struct request_queue *q; 1106f3b0e8bSChristoph Hellwig unsigned int flags; 111229a9287SOmar Sandoval unsigned int shallow_depth; 112cb96a42cSMing Lei 113cb96a42cSMing Lei /* input & output parameter */ 114cb96a42cSMing Lei struct blk_mq_ctx *ctx; 115cb96a42cSMing Lei struct blk_mq_hw_ctx *hctx; 116cb96a42cSMing Lei }; 117cb96a42cSMing Lei 1184941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) 1194941115bSJens Axboe { 120bd166ef1SJens Axboe if (data->flags & BLK_MQ_REQ_INTERNAL) 121bd166ef1SJens Axboe return data->hctx->sched_tags; 122bd166ef1SJens Axboe 1234941115bSJens Axboe return data->hctx->tags; 1244941115bSJens Axboe } 1254941115bSJens Axboe 1265d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) 1275d1b25c1SBart Van Assche { 1285d1b25c1SBart Van Assche return test_bit(BLK_MQ_S_STOPPED, &hctx->state); 1295d1b25c1SBart Van Assche } 1305d1b25c1SBart Van Assche 13119c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) 13219c66e59SMing Lei { 13319c66e59SMing Lei return hctx->nr_ctx && hctx->tags; 13419c66e59SMing Lei } 13519c66e59SMing Lei 136*f299b7c7SJens Axboe void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, 137*f299b7c7SJens Axboe unsigned int inflight[2]); 138*f299b7c7SJens Axboe 139320ae51fSJens Axboe #endif 140