blk-mq.h (8be98d2f2a0a262f8bf8a0bc1fdf522b3c7aab17) | blk-mq.h (63064be150e4b1ba1e4af594ef5aa81adf21a52d) |
---|---|
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef INT_BLK_MQ_H 3#define INT_BLK_MQ_H 4 5#include "blk-stat.h" 6#include "blk-mq-tag.h" 7 8struct blk_mq_tag_set; --- 33 unchanged lines hidden (view full) --- 42void blk_mq_wake_waiters(struct request_queue *q); 43bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, 44 unsigned int); 45void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 46 bool kick_requeue_list); 47void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 48struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 49 struct blk_mq_ctx *start); | 1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef INT_BLK_MQ_H 3#define INT_BLK_MQ_H 4 5#include "blk-stat.h" 6#include "blk-mq-tag.h" 7 8struct blk_mq_tag_set; --- 33 unchanged lines hidden (view full) --- 42void blk_mq_wake_waiters(struct request_queue *q); 43bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, 44 unsigned int); 45void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 46 bool kick_requeue_list); 47void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 48struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 49 struct blk_mq_ctx *start); |
50void blk_mq_put_rq_ref(struct request *rq); |
|
50 51/* 52 * Internal helpers for allocating/freeing the request map 53 */ 54void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 55 unsigned int hctx_idx); 56void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags); | 51 52/* 53 * Internal helpers for allocating/freeing the request map 54 */ 55void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 56 unsigned int hctx_idx); 57void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags); |
57struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 58 unsigned int hctx_idx, 59 unsigned int nr_tags, 60 unsigned int reserved_tags, 61 unsigned int flags); 62int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 63 unsigned int hctx_idx, unsigned int depth); | 58struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 59 unsigned int hctx_idx, unsigned int depth); |
64 65/* 66 * Internal helpers for request insertion into sw queues 67 */ 68void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 69 bool at_head); 70void blk_mq_request_bypass_insert(struct request *rq, bool at_head, 71 bool run_queue); --- 182 unchanged lines hidden (view full) --- 254static inline void blk_mq_put_driver_tag(struct request *rq) 255{ 256 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG) 257 return; 258 259 __blk_mq_put_driver_tag(rq->mq_hctx, rq); 260} 261 | 60 61/* 62 * Internal helpers for request insertion into sw queues 63 */ 64void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 65 bool at_head); 66void blk_mq_request_bypass_insert(struct request *rq, bool at_head, 67 bool run_queue); --- 182 unchanged lines hidden (view full) --- 250static inline void blk_mq_put_driver_tag(struct request *rq) 251{ 252 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG) 253 return; 254 255 __blk_mq_put_driver_tag(rq->mq_hctx, rq); 256} 257 |
258bool blk_mq_get_driver_tag(struct request *rq); 259 |
|
262static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) 263{ 264 int cpu; 265 266 for_each_possible_cpu(cpu) 267 qmap->mq_map[cpu] = 0; 268} 269 --- 24 unchanged lines hidden (view full) --- 294 */ 295 if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio))) 296 return current->plug; 297 298 /* Zoned block device write operation case: do not plug the BIO */ 299 return NULL; 300} 301 | 260static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) 261{ 262 int cpu; 263 264 for_each_possible_cpu(cpu) 265 qmap->mq_map[cpu] = 0; 266} 267 --- 24 unchanged lines hidden (view full) --- 292 */ 293 if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio))) 294 return current->plug; 295 296 /* Zoned block device write operation case: do not plug the BIO */ 297 return NULL; 298} 299 |
300/* Free all requests on the list */ 301static inline void blk_mq_free_requests(struct list_head *list) 302{ 303 while (!list_empty(list)) { 304 struct request *rq = list_entry_rq(list->next); 305 306 list_del_init(&rq->queuelist); 307 blk_mq_free_request(rq); 308 } 309} 310 |
|
302/* 303 * For shared tag users, we track the number of currently active users 304 * and attempt to provide a fair share of the tag depth for each of them. 305 */ 306static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, 307 struct sbitmap_queue *bt) 308{ 309 unsigned int depth, users; --- 35 unchanged lines hidden --- | 311/* 312 * For shared tag users, we track the number of currently active users 313 * and attempt to provide a fair share of the tag depth for each of them. 314 */ 315static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, 316 struct sbitmap_queue *bt) 317{ 318 unsigned int depth, users; --- 35 unchanged lines hidden --- |