1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef BLK_MQ_H 3 #define BLK_MQ_H 4 5 #include <linux/blkdev.h> 6 #include <linux/sbitmap.h> 7 #include <linux/srcu.h> 8 9 struct blk_mq_tags; 10 struct blk_flush_queue; 11 12 struct blk_mq_hw_ctx { 13 struct { 14 spinlock_t lock; 15 struct list_head dispatch; 16 unsigned long state; /* BLK_MQ_S_* flags */ 17 } ____cacheline_aligned_in_smp; 18 19 struct delayed_work run_work; 20 cpumask_var_t cpumask; 21 int next_cpu; 22 int next_cpu_batch; 23 24 unsigned long flags; /* BLK_MQ_F_* flags */ 25 26 void *sched_data; 27 struct request_queue *queue; 28 struct blk_flush_queue *fq; 29 30 void *driver_data; 31 32 struct sbitmap ctx_map; 33 34 struct blk_mq_ctx *dispatch_from; 35 36 struct blk_mq_ctx **ctxs; 37 unsigned int nr_ctx; 38 39 wait_queue_entry_t dispatch_wait; 40 atomic_t wait_index; 41 42 struct blk_mq_tags *tags; 43 struct blk_mq_tags *sched_tags; 44 45 unsigned long queued; 46 unsigned long run; 47 #define BLK_MQ_MAX_DISPATCH_ORDER 7 48 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 49 50 unsigned int numa_node; 51 unsigned int queue_num; 52 53 atomic_t nr_active; 54 unsigned int nr_expired; 55 56 struct hlist_node cpuhp_dead; 57 struct kobject kobj; 58 59 unsigned long poll_considered; 60 unsigned long poll_invoked; 61 unsigned long poll_success; 62 63 #ifdef CONFIG_BLK_DEBUG_FS 64 struct dentry *debugfs_dir; 65 struct dentry *sched_debugfs_dir; 66 #endif 67 68 /* Must be the last member - see also blk_mq_hw_ctx_size(). */ 69 struct srcu_struct srcu[0]; 70 }; 71 72 struct blk_mq_tag_set { 73 unsigned int *mq_map; 74 const struct blk_mq_ops *ops; 75 unsigned int nr_hw_queues; 76 unsigned int queue_depth; /* max hw supported */ 77 unsigned int reserved_tags; 78 unsigned int cmd_size; /* per-request extra data */ 79 int numa_node; 80 unsigned int timeout; 81 unsigned int flags; /* BLK_MQ_F_* */ 82 void *driver_data; 83 84 struct blk_mq_tags **tags; 85 86 struct mutex tag_list_lock; 87 struct list_head tag_list; 88 }; 89 90 struct blk_mq_queue_data { 91 struct request *rq; 92 bool last; 93 }; 94 95 typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, 96 const struct blk_mq_queue_data *); 97 typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *); 98 typedef void (put_budget_fn)(struct blk_mq_hw_ctx *); 99 typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); 100 typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 101 typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 102 typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *, 103 unsigned int, unsigned int); 104 typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *, 105 unsigned int); 106 107 typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, 108 bool); 109 typedef void (busy_tag_iter_fn)(struct request *, void *, bool); 110 typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int); 111 typedef int (map_queues_fn)(struct blk_mq_tag_set *set); 112 113 114 struct blk_mq_ops { 115 /* 116 * Queue request 117 */ 118 queue_rq_fn *queue_rq; 119 120 /* 121 * Reserve budget before queue request, once .queue_rq is 122 * run, it is driver's responsibility to release the 123 * reserved budget. Also we have to handle failure case 124 * of .get_budget for avoiding I/O deadlock. 125 */ 126 get_budget_fn *get_budget; 127 put_budget_fn *put_budget; 128 129 /* 130 * Called on request timeout 131 */ 132 timeout_fn *timeout; 133 134 /* 135 * Called to poll for completion of a specific tag. 136 */ 137 poll_fn *poll; 138 139 softirq_done_fn *complete; 140 141 /* 142 * Called when the block layer side of a hardware queue has been 143 * set up, allowing the driver to allocate/init matching structures. 144 * Ditto for exit/teardown. 145 */ 146 init_hctx_fn *init_hctx; 147 exit_hctx_fn *exit_hctx; 148 149 /* 150 * Called for every command allocated by the block layer to allow 151 * the driver to set up driver specific data. 152 * 153 * Tag greater than or equal to queue_depth is for setting up 154 * flush request. 155 * 156 * Ditto for exit/teardown. 157 */ 158 init_request_fn *init_request; 159 exit_request_fn *exit_request; 160 /* Called from inside blk_get_request() */ 161 void (*initialize_rq_fn)(struct request *rq); 162 163 map_queues_fn *map_queues; 164 165 #ifdef CONFIG_BLK_DEBUG_FS 166 /* 167 * Used by the debugfs implementation to show driver-specific 168 * information about a request. 169 */ 170 void (*show_rq)(struct seq_file *m, struct request *rq); 171 #endif 172 }; 173 174 enum { 175 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 176 BLK_MQ_F_TAG_SHARED = 1 << 1, 177 BLK_MQ_F_SG_MERGE = 1 << 2, 178 BLK_MQ_F_BLOCKING = 1 << 5, 179 BLK_MQ_F_NO_SCHED = 1 << 6, 180 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, 181 BLK_MQ_F_ALLOC_POLICY_BITS = 1, 182 183 BLK_MQ_S_STOPPED = 0, 184 BLK_MQ_S_TAG_ACTIVE = 1, 185 BLK_MQ_S_SCHED_RESTART = 2, 186 BLK_MQ_S_START_ON_RUN = 3, 187 188 BLK_MQ_MAX_DEPTH = 10240, 189 190 BLK_MQ_CPU_WORK_BATCH = 8, 191 }; 192 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ 193 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ 194 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) 195 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ 196 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ 197 << BLK_MQ_F_ALLOC_POLICY_START_BIT) 198 199 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 200 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 201 struct request_queue *q); 202 int blk_mq_register_dev(struct device *, struct request_queue *); 203 void blk_mq_unregister_dev(struct device *, struct request_queue *); 204 205 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); 206 void blk_mq_free_tag_set(struct blk_mq_tag_set *set); 207 208 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 209 210 void blk_mq_free_request(struct request *rq); 211 bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 212 213 enum { 214 /* return when out of requests */ 215 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), 216 /* allocate from reserved pool */ 217 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), 218 /* allocate internal/sched tag */ 219 BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2), 220 /* set RQF_PREEMPT */ 221 BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), 222 }; 223 224 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 225 blk_mq_req_flags_t flags); 226 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 227 unsigned int op, blk_mq_req_flags_t flags, 228 unsigned int hctx_idx); 229 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); 230 231 enum { 232 BLK_MQ_UNIQUE_TAG_BITS = 16, 233 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, 234 }; 235 236 u32 blk_mq_unique_tag(struct request *rq); 237 238 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) 239 { 240 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; 241 } 242 243 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) 244 { 245 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; 246 } 247 248 249 int blk_mq_request_started(struct request *rq); 250 void blk_mq_start_request(struct request *rq); 251 void blk_mq_end_request(struct request *rq, blk_status_t error); 252 void __blk_mq_end_request(struct request *rq, blk_status_t error); 253 254 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); 255 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 256 bool kick_requeue_list); 257 void blk_mq_kick_requeue_list(struct request_queue *q); 258 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); 259 void blk_mq_complete_request(struct request *rq); 260 261 bool blk_mq_queue_stopped(struct request_queue *q); 262 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 263 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 264 void blk_mq_stop_hw_queues(struct request_queue *q); 265 void blk_mq_start_hw_queues(struct request_queue *q); 266 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 267 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 268 void blk_mq_quiesce_queue(struct request_queue *q); 269 void blk_mq_unquiesce_queue(struct request_queue *q); 270 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 271 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 272 void blk_mq_run_hw_queues(struct request_queue *q, bool async); 273 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 274 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, 275 busy_tag_iter_fn *fn, void *priv); 276 void blk_mq_freeze_queue(struct request_queue *q); 277 void blk_mq_unfreeze_queue(struct request_queue *q); 278 void blk_freeze_queue_start(struct request_queue *q); 279 void blk_mq_freeze_queue_wait(struct request_queue *q); 280 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 281 unsigned long timeout); 282 int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data, 283 int (reinit_request)(void *, struct request *)); 284 285 int blk_mq_map_queues(struct blk_mq_tag_set *set); 286 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); 287 288 void blk_mq_quiesce_queue_nowait(struct request_queue *q); 289 290 /* 291 * Driver command data is immediately after the request. So subtract request 292 * size to get back to the original request, add request size to get the PDU. 293 */ 294 static inline struct request *blk_mq_rq_from_pdu(void *pdu) 295 { 296 return pdu - sizeof(struct request); 297 } 298 static inline void *blk_mq_rq_to_pdu(struct request *rq) 299 { 300 return rq + 1; 301 } 302 303 #define queue_for_each_hw_ctx(q, hctx, i) \ 304 for ((i) = 0; (i) < (q)->nr_hw_queues && \ 305 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) 306 307 #define hctx_for_each_ctx(hctx, ctx, i) \ 308 for ((i) = 0; (i) < (hctx)->nr_ctx && \ 309 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) 310 311 #endif 312