1 #ifndef INT_BLK_MQ_H 2 #define INT_BLK_MQ_H 3 4 struct blk_mq_tag_set; 5 6 struct blk_mq_ctx { 7 struct { 8 spinlock_t lock; 9 struct list_head rq_list; 10 } ____cacheline_aligned_in_smp; 11 12 unsigned int cpu; 13 unsigned int index_hw; 14 15 unsigned int last_tag ____cacheline_aligned_in_smp; 16 17 /* incremented at dispatch time */ 18 unsigned long rq_dispatched[2]; 19 unsigned long rq_merged; 20 21 /* incremented at completion time */ 22 unsigned long ____cacheline_aligned_in_smp rq_completed[2]; 23 24 struct request_queue *queue; 25 struct kobject kobj; 26 } ____cacheline_aligned_in_smp; 27 28 void __blk_mq_complete_request(struct request *rq); 29 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 30 void blk_mq_freeze_queue(struct request_queue *q); 31 void blk_mq_free_queue(struct request_queue *q); 32 void blk_mq_clone_flush_request(struct request *flush_rq, 33 struct request *orig_rq); 34 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 35 36 /* 37 * CPU hotplug helpers 38 */ 39 struct blk_mq_cpu_notifier; 40 void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, 41 int (*fn)(void *, unsigned long, unsigned int), 42 void *data); 43 void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier); 44 void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier); 45 void blk_mq_cpu_init(void); 46 void blk_mq_enable_hotplug(void); 47 void blk_mq_disable_hotplug(void); 48 49 /* 50 * CPU -> queue mappings 51 */ 52 extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); 53 extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); 54 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); 55 56 /* 57 * sysfs helpers 58 */ 59 extern int blk_mq_sysfs_register(struct request_queue *q); 60 extern void blk_mq_sysfs_unregister(struct request_queue *q); 61 62 extern void blk_mq_rq_timed_out(struct request *req, bool reserved); 63 64 /* 65 * Basic implementation of sparser bitmap, allowing the user to spread 66 * the bits over more cachelines. 67 */ 68 struct blk_align_bitmap { 69 unsigned long word; 70 unsigned long depth; 71 } ____cacheline_aligned_in_smp; 72 73 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, 74 unsigned int cpu) 75 { 76 return per_cpu_ptr(q->queue_ctx, cpu); 77 } 78 79 /* 80 * This assumes per-cpu software queueing queues. They could be per-node 81 * as well, for instance. For now this is hardcoded as-is. Note that we don't 82 * care about preemption, since we know the ctx's are persistent. This does 83 * mean that we can't rely on ctx always matching the currently running CPU. 84 */ 85 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) 86 { 87 return __blk_mq_get_ctx(q, get_cpu()); 88 } 89 90 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) 91 { 92 put_cpu(); 93 } 94 95 struct blk_mq_alloc_data { 96 /* input parameter */ 97 struct request_queue *q; 98 gfp_t gfp; 99 bool reserved; 100 101 /* input & output parameter */ 102 struct blk_mq_ctx *ctx; 103 struct blk_mq_hw_ctx *hctx; 104 }; 105 106 static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data, 107 struct request_queue *q, gfp_t gfp, bool reserved, 108 struct blk_mq_ctx *ctx, 109 struct blk_mq_hw_ctx *hctx) 110 { 111 data->q = q; 112 data->gfp = gfp; 113 data->reserved = reserved; 114 data->ctx = ctx; 115 data->hctx = hctx; 116 } 117 118 #endif 119