xref: /openbmc/linux/block/blk-mq.h (revision 3821a065)
1 #ifndef INT_BLK_MQ_H
2 #define INT_BLK_MQ_H
3 
4 struct blk_mq_tag_set;
5 
6 struct blk_mq_ctx {
7 	struct {
8 		spinlock_t		lock;
9 		struct list_head	rq_list;
10 	}  ____cacheline_aligned_in_smp;
11 
12 	unsigned int		cpu;
13 	unsigned int		index_hw;
14 
15 	unsigned int		last_tag ____cacheline_aligned_in_smp;
16 
17 	/* incremented at dispatch time */
18 	unsigned long		rq_dispatched[2];
19 	unsigned long		rq_merged;
20 
21 	/* incremented at completion time */
22 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
23 
24 	struct request_queue	*queue;
25 	struct kobject		kobj;
26 } ____cacheline_aligned_in_smp;
27 
28 void __blk_mq_complete_request(struct request *rq);
29 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
30 void blk_mq_freeze_queue(struct request_queue *q);
31 void blk_mq_free_queue(struct request_queue *q);
32 void blk_mq_clone_flush_request(struct request *flush_rq,
33 		struct request *orig_rq);
34 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
35 void blk_mq_wake_waiters(struct request_queue *q);
36 
37 /*
38  * CPU hotplug helpers
39  */
40 struct blk_mq_cpu_notifier;
41 void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
42 			      int (*fn)(void *, unsigned long, unsigned int),
43 			      void *data);
44 void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
45 void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
46 void blk_mq_cpu_init(void);
47 void blk_mq_enable_hotplug(void);
48 void blk_mq_disable_hotplug(void);
49 
50 /*
51  * CPU -> queue mappings
52  */
53 extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
54 extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
55 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
56 
57 /*
58  * sysfs helpers
59  */
60 extern int blk_mq_sysfs_register(struct request_queue *q);
61 extern void blk_mq_sysfs_unregister(struct request_queue *q);
62 
63 extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
64 
65 void blk_mq_release(struct request_queue *q);
66 
67 /*
68  * Basic implementation of sparser bitmap, allowing the user to spread
69  * the bits over more cachelines.
70  */
71 struct blk_align_bitmap {
72 	unsigned long word;
73 	unsigned long depth;
74 } ____cacheline_aligned_in_smp;
75 
76 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
77 					   unsigned int cpu)
78 {
79 	return per_cpu_ptr(q->queue_ctx, cpu);
80 }
81 
82 /*
83  * This assumes per-cpu software queueing queues. They could be per-node
84  * as well, for instance. For now this is hardcoded as-is. Note that we don't
85  * care about preemption, since we know the ctx's are persistent. This does
86  * mean that we can't rely on ctx always matching the currently running CPU.
87  */
88 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
89 {
90 	return __blk_mq_get_ctx(q, get_cpu());
91 }
92 
93 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
94 {
95 	put_cpu();
96 }
97 
98 struct blk_mq_alloc_data {
99 	/* input parameter */
100 	struct request_queue *q;
101 	gfp_t gfp;
102 	bool reserved;
103 
104 	/* input & output parameter */
105 	struct blk_mq_ctx *ctx;
106 	struct blk_mq_hw_ctx *hctx;
107 };
108 
109 static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
110 		struct request_queue *q, gfp_t gfp, bool reserved,
111 		struct blk_mq_ctx *ctx,
112 		struct blk_mq_hw_ctx *hctx)
113 {
114 	data->q = q;
115 	data->gfp = gfp;
116 	data->reserved = reserved;
117 	data->ctx = ctx;
118 	data->hctx = hctx;
119 }
120 
121 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
122 {
123 	return hctx->nr_ctx && hctx->tags;
124 }
125 
126 #endif
127