xref: /openbmc/linux/block/blk-mq.h (revision bc5aa3a0)
1 #ifndef INT_BLK_MQ_H
2 #define INT_BLK_MQ_H
3 
4 struct blk_mq_tag_set;
5 
6 struct blk_mq_ctx {
7 	struct {
8 		spinlock_t		lock;
9 		struct list_head	rq_list;
10 	}  ____cacheline_aligned_in_smp;
11 
12 	unsigned int		cpu;
13 	unsigned int		index_hw;
14 
15 	unsigned int		last_tag ____cacheline_aligned_in_smp;
16 
17 	/* incremented at dispatch time */
18 	unsigned long		rq_dispatched[2];
19 	unsigned long		rq_merged;
20 
21 	/* incremented at completion time */
22 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
23 
24 	struct request_queue	*queue;
25 	struct kobject		kobj;
26 } ____cacheline_aligned_in_smp;
27 
28 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
29 void blk_mq_freeze_queue(struct request_queue *q);
30 void blk_mq_free_queue(struct request_queue *q);
31 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
32 void blk_mq_wake_waiters(struct request_queue *q);
33 
34 /*
35  * CPU hotplug helpers
36  */
37 struct blk_mq_cpu_notifier;
38 void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
39 			      int (*fn)(void *, unsigned long, unsigned int),
40 			      void *data);
41 void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
42 void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
43 void blk_mq_cpu_init(void);
44 void blk_mq_enable_hotplug(void);
45 void blk_mq_disable_hotplug(void);
46 
47 /*
48  * CPU -> queue mappings
49  */
50 extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
51 extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
52 				   const struct cpumask *online_mask);
53 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
54 
55 /*
56  * sysfs helpers
57  */
58 extern int blk_mq_sysfs_register(struct request_queue *q);
59 extern void blk_mq_sysfs_unregister(struct request_queue *q);
60 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
61 
62 extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
63 
64 void blk_mq_release(struct request_queue *q);
65 
66 /*
67  * Basic implementation of sparser bitmap, allowing the user to spread
68  * the bits over more cachelines.
69  */
70 struct blk_align_bitmap {
71 	unsigned long word;
72 	unsigned long depth;
73 } ____cacheline_aligned_in_smp;
74 
75 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
76 					   unsigned int cpu)
77 {
78 	return per_cpu_ptr(q->queue_ctx, cpu);
79 }
80 
81 /*
82  * This assumes per-cpu software queueing queues. They could be per-node
83  * as well, for instance. For now this is hardcoded as-is. Note that we don't
84  * care about preemption, since we know the ctx's are persistent. This does
85  * mean that we can't rely on ctx always matching the currently running CPU.
86  */
87 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
88 {
89 	return __blk_mq_get_ctx(q, get_cpu());
90 }
91 
92 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
93 {
94 	put_cpu();
95 }
96 
97 struct blk_mq_alloc_data {
98 	/* input parameter */
99 	struct request_queue *q;
100 	unsigned int flags;
101 
102 	/* input & output parameter */
103 	struct blk_mq_ctx *ctx;
104 	struct blk_mq_hw_ctx *hctx;
105 };
106 
107 static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
108 		struct request_queue *q, unsigned int flags,
109 		struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx)
110 {
111 	data->q = q;
112 	data->flags = flags;
113 	data->ctx = ctx;
114 	data->hctx = hctx;
115 }
116 
117 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
118 {
119 	return hctx->nr_ctx && hctx->tags;
120 }
121 
122 #endif
123