1320ae51fSJens Axboe #ifndef INT_BLK_MQ_H 2320ae51fSJens Axboe #define INT_BLK_MQ_H 3320ae51fSJens Axboe 4cf43e6beSJens Axboe #include "blk-stat.h" 5cf43e6beSJens Axboe 624d2f903SChristoph Hellwig struct blk_mq_tag_set; 724d2f903SChristoph Hellwig 8320ae51fSJens Axboe struct blk_mq_ctx { 9320ae51fSJens Axboe struct { 10320ae51fSJens Axboe spinlock_t lock; 11320ae51fSJens Axboe struct list_head rq_list; 12320ae51fSJens Axboe } ____cacheline_aligned_in_smp; 13320ae51fSJens Axboe 14320ae51fSJens Axboe unsigned int cpu; 15320ae51fSJens Axboe unsigned int index_hw; 16320ae51fSJens Axboe 17320ae51fSJens Axboe /* incremented at dispatch time */ 18320ae51fSJens Axboe unsigned long rq_dispatched[2]; 19320ae51fSJens Axboe unsigned long rq_merged; 20320ae51fSJens Axboe 21320ae51fSJens Axboe /* incremented at completion time */ 22320ae51fSJens Axboe unsigned long ____cacheline_aligned_in_smp rq_completed[2]; 23cf43e6beSJens Axboe struct blk_rq_stat stat[2]; 24320ae51fSJens Axboe 25320ae51fSJens Axboe struct request_queue *queue; 26320ae51fSJens Axboe struct kobject kobj; 274bb659b1SJens Axboe } ____cacheline_aligned_in_smp; 28320ae51fSJens Axboe 29320ae51fSJens Axboe void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 30780db207STejun Heo void blk_mq_freeze_queue(struct request_queue *q); 313edcc0ceSMing Lei void blk_mq_free_queue(struct request_queue *q); 32e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 33aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q); 34f04c3df3SJens Axboe bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *); 352c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 3650e1dab8SJens Axboe bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx); 37bd6737f1SJens Axboe bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, 38bd6737f1SJens Axboe bool wait); 392c3ad667SJens Axboe 402c3ad667SJens Axboe /* 412c3ad667SJens Axboe * Internal helpers for allocating/freeing the request map 422c3ad667SJens Axboe */ 43cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 442c3ad667SJens Axboe unsigned int hctx_idx); 45cc71a6f4SJens Axboe void blk_mq_free_rq_map(struct blk_mq_tags *tags); 46cc71a6f4SJens Axboe struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 47cc71a6f4SJens Axboe unsigned int hctx_idx, 48cc71a6f4SJens Axboe unsigned int nr_tags, 49cc71a6f4SJens Axboe unsigned int reserved_tags); 50cc71a6f4SJens Axboe int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 51cc71a6f4SJens Axboe unsigned int hctx_idx, unsigned int depth); 522c3ad667SJens Axboe 532c3ad667SJens Axboe /* 542c3ad667SJens Axboe * Internal helpers for request insertion into sw queues 552c3ad667SJens Axboe */ 562c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 572c3ad667SJens Axboe bool at_head); 58bd166ef1SJens Axboe void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 59bd166ef1SJens Axboe struct list_head *list); 60320ae51fSJens Axboe /* 61320ae51fSJens Axboe * CPU hotplug helpers 62320ae51fSJens Axboe */ 63676141e4SJens Axboe void blk_mq_enable_hotplug(void); 64676141e4SJens Axboe void blk_mq_disable_hotplug(void); 65320ae51fSJens Axboe 66320ae51fSJens Axboe /* 67320ae51fSJens Axboe * CPU -> queue mappings 68320ae51fSJens Axboe */ 69f14bbe77SJens Axboe extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); 70320ae51fSJens Axboe 717d7e0f90SChristoph Hellwig static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, 727d7e0f90SChristoph Hellwig int cpu) 737d7e0f90SChristoph Hellwig { 747d7e0f90SChristoph Hellwig return q->queue_hw_ctx[q->mq_map[cpu]]; 757d7e0f90SChristoph Hellwig } 767d7e0f90SChristoph Hellwig 77e93ecf60SJens Axboe /* 7867aec14cSJens Axboe * sysfs helpers 7967aec14cSJens Axboe */ 80737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q); 81*7ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q); 8267aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q); 8367aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q); 84868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); 8567aec14cSJens Axboe 8607e4feadSOmar Sandoval /* 8707e4feadSOmar Sandoval * debugfs helpers 8807e4feadSOmar Sandoval */ 89400f73b2SOmar Sandoval #ifdef CONFIG_BLK_DEBUG_FS 9007e4feadSOmar Sandoval int blk_mq_debugfs_register(struct request_queue *q, const char *name); 9107e4feadSOmar Sandoval void blk_mq_debugfs_unregister(struct request_queue *q); 9207e4feadSOmar Sandoval int blk_mq_debugfs_register_hctxs(struct request_queue *q); 9307e4feadSOmar Sandoval void blk_mq_debugfs_unregister_hctxs(struct request_queue *q); 9407e4feadSOmar Sandoval #else 95400f73b2SOmar Sandoval static inline int blk_mq_debugfs_register(struct request_queue *q, 96400f73b2SOmar Sandoval const char *name) 9707e4feadSOmar Sandoval { 9807e4feadSOmar Sandoval return 0; 9907e4feadSOmar Sandoval } 10007e4feadSOmar Sandoval 101400f73b2SOmar Sandoval static inline void blk_mq_debugfs_unregister(struct request_queue *q) 10207e4feadSOmar Sandoval { 10307e4feadSOmar Sandoval } 10407e4feadSOmar Sandoval 105400f73b2SOmar Sandoval static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q) 10607e4feadSOmar Sandoval { 10707e4feadSOmar Sandoval return 0; 10807e4feadSOmar Sandoval } 10907e4feadSOmar Sandoval 110400f73b2SOmar Sandoval static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) 11107e4feadSOmar Sandoval { 11207e4feadSOmar Sandoval } 11307e4feadSOmar Sandoval #endif 11407e4feadSOmar Sandoval 11590415837SChristoph Hellwig extern void blk_mq_rq_timed_out(struct request *req, bool reserved); 11690415837SChristoph Hellwig 117e09aae7eSMing Lei void blk_mq_release(struct request_queue *q); 118e09aae7eSMing Lei 1191aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, 1201aecfe48SMing Lei unsigned int cpu) 1211aecfe48SMing Lei { 1221aecfe48SMing Lei return per_cpu_ptr(q->queue_ctx, cpu); 1231aecfe48SMing Lei } 1241aecfe48SMing Lei 1251aecfe48SMing Lei /* 1261aecfe48SMing Lei * This assumes per-cpu software queueing queues. They could be per-node 1271aecfe48SMing Lei * as well, for instance. For now this is hardcoded as-is. Note that we don't 1281aecfe48SMing Lei * care about preemption, since we know the ctx's are persistent. This does 1291aecfe48SMing Lei * mean that we can't rely on ctx always matching the currently running CPU. 1301aecfe48SMing Lei */ 1311aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) 1321aecfe48SMing Lei { 1331aecfe48SMing Lei return __blk_mq_get_ctx(q, get_cpu()); 1341aecfe48SMing Lei } 1351aecfe48SMing Lei 1361aecfe48SMing Lei static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) 1371aecfe48SMing Lei { 1381aecfe48SMing Lei put_cpu(); 1391aecfe48SMing Lei } 1401aecfe48SMing Lei 141cb96a42cSMing Lei struct blk_mq_alloc_data { 142cb96a42cSMing Lei /* input parameter */ 143cb96a42cSMing Lei struct request_queue *q; 1446f3b0e8bSChristoph Hellwig unsigned int flags; 145cb96a42cSMing Lei 146cb96a42cSMing Lei /* input & output parameter */ 147cb96a42cSMing Lei struct blk_mq_ctx *ctx; 148cb96a42cSMing Lei struct blk_mq_hw_ctx *hctx; 149cb96a42cSMing Lei }; 150cb96a42cSMing Lei 1514941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) 1524941115bSJens Axboe { 153bd166ef1SJens Axboe if (data->flags & BLK_MQ_REQ_INTERNAL) 154bd166ef1SJens Axboe return data->hctx->sched_tags; 155bd166ef1SJens Axboe 1564941115bSJens Axboe return data->hctx->tags; 1574941115bSJens Axboe } 1584941115bSJens Axboe 1592c3ad667SJens Axboe /* 1602c3ad667SJens Axboe * Internal helpers for request allocation/init/free 1612c3ad667SJens Axboe */ 1622c3ad667SJens Axboe void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, 1632c3ad667SJens Axboe struct request *rq, unsigned int op); 164bd166ef1SJens Axboe void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 1652c3ad667SJens Axboe struct request *rq); 166bd166ef1SJens Axboe void blk_mq_finish_request(struct request *rq); 1672c3ad667SJens Axboe struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data, 1682c3ad667SJens Axboe unsigned int op); 1692c3ad667SJens Axboe 1705d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) 1715d1b25c1SBart Van Assche { 1725d1b25c1SBart Van Assche return test_bit(BLK_MQ_S_STOPPED, &hctx->state); 1735d1b25c1SBart Van Assche } 1745d1b25c1SBart Van Assche 17519c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) 17619c66e59SMing Lei { 17719c66e59SMing Lei return hctx->nr_ctx && hctx->tags; 17819c66e59SMing Lei } 17919c66e59SMing Lei 180320ae51fSJens Axboe #endif 181