xref: /openbmc/linux/block/blk-cgroup.h (revision 5d4a2e29)
1 #ifndef _BLK_CGROUP_H
2 #define _BLK_CGROUP_H
3 /*
4  * Common Block IO controller cgroup interface
5  *
6  * Based on ideas and code from CFQ, CFS and BFQ:
7  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  *
9  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10  *		      Paolo Valente <paolo.valente@unimore.it>
11  *
12  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13  * 	              Nauman Rafique <nauman@google.com>
14  */
15 
16 #include <linux/cgroup.h>
17 
18 #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
19 
20 #ifndef CONFIG_BLK_CGROUP
21 /* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
22 extern struct cgroup_subsys blkio_subsys;
23 #define blkio_subsys_id blkio_subsys.subsys_id
24 #endif
25 
26 enum stat_type {
27 	/* Total time spent (in ns) between request dispatch to the driver and
28 	 * request completion for IOs doen by this cgroup. This may not be
29 	 * accurate when NCQ is turned on. */
30 	BLKIO_STAT_SERVICE_TIME = 0,
31 	/* Total bytes transferred */
32 	BLKIO_STAT_SERVICE_BYTES,
33 	/* Total IOs serviced, post merge */
34 	BLKIO_STAT_SERVICED,
35 	/* Total time spent waiting in scheduler queue in ns */
36 	BLKIO_STAT_WAIT_TIME,
37 	/* Number of IOs merged */
38 	BLKIO_STAT_MERGED,
39 	/* Number of IOs queued up */
40 	BLKIO_STAT_QUEUED,
41 	/* All the single valued stats go below this */
42 	BLKIO_STAT_TIME,
43 	BLKIO_STAT_SECTORS,
44 #ifdef CONFIG_DEBUG_BLK_CGROUP
45 	BLKIO_STAT_AVG_QUEUE_SIZE,
46 	BLKIO_STAT_IDLE_TIME,
47 	BLKIO_STAT_EMPTY_TIME,
48 	BLKIO_STAT_GROUP_WAIT_TIME,
49 	BLKIO_STAT_DEQUEUE
50 #endif
51 };
52 
53 enum stat_sub_type {
54 	BLKIO_STAT_READ = 0,
55 	BLKIO_STAT_WRITE,
56 	BLKIO_STAT_SYNC,
57 	BLKIO_STAT_ASYNC,
58 	BLKIO_STAT_TOTAL
59 };
60 
61 /* blkg state flags */
62 enum blkg_state_flags {
63 	BLKG_waiting = 0,
64 	BLKG_idling,
65 	BLKG_empty,
66 };
67 
68 struct blkio_cgroup {
69 	struct cgroup_subsys_state css;
70 	unsigned int weight;
71 	spinlock_t lock;
72 	struct hlist_head blkg_list;
73 	struct list_head policy_list; /* list of blkio_policy_node */
74 };
75 
76 struct blkio_group_stats {
77 	/* total disk time and nr sectors dispatched by this group */
78 	uint64_t time;
79 	uint64_t sectors;
80 	uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
81 #ifdef CONFIG_DEBUG_BLK_CGROUP
82 	/* Sum of number of IOs queued across all samples */
83 	uint64_t avg_queue_size_sum;
84 	/* Count of samples taken for average */
85 	uint64_t avg_queue_size_samples;
86 	/* How many times this group has been removed from service tree */
87 	unsigned long dequeue;
88 
89 	/* Total time spent waiting for it to be assigned a timeslice. */
90 	uint64_t group_wait_time;
91 	uint64_t start_group_wait_time;
92 
93 	/* Time spent idling for this blkio_group */
94 	uint64_t idle_time;
95 	uint64_t start_idle_time;
96 	/*
97 	 * Total time when we have requests queued and do not contain the
98 	 * current active queue.
99 	 */
100 	uint64_t empty_time;
101 	uint64_t start_empty_time;
102 	uint16_t flags;
103 #endif
104 };
105 
106 struct blkio_group {
107 	/* An rcu protected unique identifier for the group */
108 	void *key;
109 	struct hlist_node blkcg_node;
110 	unsigned short blkcg_id;
111 	/* Store cgroup path */
112 	char path[128];
113 	/* The device MKDEV(major, minor), this group has been created for */
114 	dev_t dev;
115 
116 	/* Need to serialize the stats in the case of reset/update */
117 	spinlock_t stats_lock;
118 	struct blkio_group_stats stats;
119 };
120 
121 struct blkio_policy_node {
122 	struct list_head node;
123 	dev_t dev;
124 	unsigned int weight;
125 };
126 
127 extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
128 				     dev_t dev);
129 
130 typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
131 typedef void (blkio_update_group_weight_fn) (struct blkio_group *blkg,
132 						unsigned int weight);
133 
134 struct blkio_policy_ops {
135 	blkio_unlink_group_fn *blkio_unlink_group_fn;
136 	blkio_update_group_weight_fn *blkio_update_group_weight_fn;
137 };
138 
139 struct blkio_policy_type {
140 	struct list_head list;
141 	struct blkio_policy_ops ops;
142 };
143 
144 /* Blkio controller policy registration */
145 extern void blkio_policy_register(struct blkio_policy_type *);
146 extern void blkio_policy_unregister(struct blkio_policy_type *);
147 
148 static inline char *blkg_path(struct blkio_group *blkg)
149 {
150 	return blkg->path;
151 }
152 
153 #else
154 
155 struct blkio_group {
156 };
157 
158 struct blkio_policy_type {
159 };
160 
161 static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
162 static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
163 
164 static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
165 
166 #endif
167 
168 #define BLKIO_WEIGHT_MIN	100
169 #define BLKIO_WEIGHT_MAX	1000
170 #define BLKIO_WEIGHT_DEFAULT	500
171 
172 #ifdef CONFIG_DEBUG_BLK_CGROUP
173 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg);
174 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
175 				unsigned long dequeue);
176 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg);
177 void blkiocg_update_idle_time_stats(struct blkio_group *blkg);
178 void blkiocg_set_start_empty_time(struct blkio_group *blkg);
179 
180 #define BLKG_FLAG_FNS(name)						\
181 static inline void blkio_mark_blkg_##name(				\
182 		struct blkio_group_stats *stats)			\
183 {									\
184 	stats->flags |= (1 << BLKG_##name);				\
185 }									\
186 static inline void blkio_clear_blkg_##name(				\
187 		struct blkio_group_stats *stats)			\
188 {									\
189 	stats->flags &= ~(1 << BLKG_##name);				\
190 }									\
191 static inline int blkio_blkg_##name(struct blkio_group_stats *stats)	\
192 {									\
193 	return (stats->flags & (1 << BLKG_##name)) != 0;		\
194 }									\
195 
196 BLKG_FLAG_FNS(waiting)
197 BLKG_FLAG_FNS(idling)
198 BLKG_FLAG_FNS(empty)
199 #undef BLKG_FLAG_FNS
200 #else
201 static inline void blkiocg_update_avg_queue_size_stats(
202 						struct blkio_group *blkg) {}
203 static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
204 						unsigned long dequeue) {}
205 static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
206 {}
207 static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {}
208 static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
209 #endif
210 
211 #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
212 extern struct blkio_cgroup blkio_root_cgroup;
213 extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
214 extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
215 			struct blkio_group *blkg, void *key, dev_t dev);
216 extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
217 extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
218 						void *key);
219 void blkiocg_update_timeslice_used(struct blkio_group *blkg,
220 					unsigned long time);
221 void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
222 						bool direction, bool sync);
223 void blkiocg_update_completion_stats(struct blkio_group *blkg,
224 	uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
225 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
226 					bool sync);
227 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
228 		struct blkio_group *curr_blkg, bool direction, bool sync);
229 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
230 					bool direction, bool sync);
231 #else
232 struct cgroup;
233 static inline struct blkio_cgroup *
234 cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
235 
236 static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
237 			struct blkio_group *blkg, void *key, dev_t dev) {}
238 
239 static inline int
240 blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
241 
242 static inline struct blkio_group *
243 blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
244 static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
245 						unsigned long time) {}
246 static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
247 				uint64_t bytes, bool direction, bool sync) {}
248 static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
249 		uint64_t start_time, uint64_t io_start_time, bool direction,
250 		bool sync) {}
251 static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
252 						bool direction, bool sync) {}
253 static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
254 		struct blkio_group *curr_blkg, bool direction, bool sync) {}
255 static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
256 						bool direction, bool sync) {}
257 #endif
258 #endif /* _BLK_CGROUP_H */
259