xref: /openbmc/linux/block/blk-cgroup.h (revision 2223cbec)
1 #ifndef _BLK_CGROUP_H
2 #define _BLK_CGROUP_H
3 /*
4  * Common Block IO controller cgroup interface
5  *
6  * Based on ideas and code from CFQ, CFS and BFQ:
7  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  *
9  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10  *		      Paolo Valente <paolo.valente@unimore.it>
11  *
12  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13  * 	              Nauman Rafique <nauman@google.com>
14  */
15 
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/seq_file.h>
19 #include <linux/radix-tree.h>
20 #include <linux/blkdev.h>
21 
22 /* Max limits for throttle policy */
23 #define THROTL_IOPS_MAX		UINT_MAX
24 
25 /* CFQ specific, out here for blkcg->cfq_weight */
26 #define CFQ_WEIGHT_MIN		10
27 #define CFQ_WEIGHT_MAX		1000
28 #define CFQ_WEIGHT_DEFAULT	500
29 
30 #ifdef CONFIG_BLK_CGROUP
31 
32 enum blkg_rwstat_type {
33 	BLKG_RWSTAT_READ,
34 	BLKG_RWSTAT_WRITE,
35 	BLKG_RWSTAT_SYNC,
36 	BLKG_RWSTAT_ASYNC,
37 
38 	BLKG_RWSTAT_NR,
39 	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
40 };
41 
42 struct blkcg_gq;
43 
44 struct blkcg {
45 	struct cgroup_subsys_state	css;
46 	spinlock_t			lock;
47 
48 	struct radix_tree_root		blkg_tree;
49 	struct blkcg_gq			*blkg_hint;
50 	struct hlist_head		blkg_list;
51 
52 	/* for policies to test whether associated blkcg has changed */
53 	uint64_t			id;
54 
55 	/* TODO: per-policy storage in blkcg */
56 	unsigned int			cfq_weight;	/* belongs to cfq */
57 };
58 
59 struct blkg_stat {
60 	struct u64_stats_sync		syncp;
61 	uint64_t			cnt;
62 };
63 
64 struct blkg_rwstat {
65 	struct u64_stats_sync		syncp;
66 	uint64_t			cnt[BLKG_RWSTAT_NR];
67 };
68 
69 /*
70  * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
71  * request_queue (q).  This is used by blkcg policies which need to track
72  * information per blkcg - q pair.
73  *
74  * There can be multiple active blkcg policies and each has its private
75  * data on each blkg, the size of which is determined by
76  * blkcg_policy->pd_size.  blkcg core allocates and frees such areas
77  * together with blkg and invokes pd_init/exit_fn() methods.
78  *
79  * Such private data must embed struct blkg_policy_data (pd) at the
80  * beginning and pd_size can't be smaller than pd.
81  */
82 struct blkg_policy_data {
83 	/* the blkg this per-policy data belongs to */
84 	struct blkcg_gq			*blkg;
85 
86 	/* used during policy activation */
87 	struct list_head		alloc_node;
88 };
89 
90 /* association between a blk cgroup and a request queue */
91 struct blkcg_gq {
92 	/* Pointer to the associated request_queue */
93 	struct request_queue		*q;
94 	struct list_head		q_node;
95 	struct hlist_node		blkcg_node;
96 	struct blkcg			*blkcg;
97 	/* request allocation list for this blkcg-q pair */
98 	struct request_list		rl;
99 	/* reference count */
100 	int				refcnt;
101 
102 	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
103 
104 	struct rcu_head			rcu_head;
105 };
106 
107 typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
108 typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
109 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
110 
111 struct blkcg_policy {
112 	int				plid;
113 	/* policy specific private data size */
114 	size_t				pd_size;
115 	/* cgroup files for the policy */
116 	struct cftype			*cftypes;
117 
118 	/* operations */
119 	blkcg_pol_init_pd_fn		*pd_init_fn;
120 	blkcg_pol_exit_pd_fn		*pd_exit_fn;
121 	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
122 };
123 
124 extern struct blkcg blkcg_root;
125 
126 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
127 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
128 				    struct request_queue *q);
129 int blkcg_init_queue(struct request_queue *q);
130 void blkcg_drain_queue(struct request_queue *q);
131 void blkcg_exit_queue(struct request_queue *q);
132 
133 /* Blkio controller policy registration */
134 int blkcg_policy_register(struct blkcg_policy *pol);
135 void blkcg_policy_unregister(struct blkcg_policy *pol);
136 int blkcg_activate_policy(struct request_queue *q,
137 			  const struct blkcg_policy *pol);
138 void blkcg_deactivate_policy(struct request_queue *q,
139 			     const struct blkcg_policy *pol);
140 
141 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
142 		       u64 (*prfill)(struct seq_file *,
143 				     struct blkg_policy_data *, int),
144 		       const struct blkcg_policy *pol, int data,
145 		       bool show_total);
146 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
147 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
148 			 const struct blkg_rwstat *rwstat);
149 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
150 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
151 		       int off);
152 
153 struct blkg_conf_ctx {
154 	struct gendisk			*disk;
155 	struct blkcg_gq			*blkg;
156 	u64				v;
157 };
158 
159 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
160 		   const char *input, struct blkg_conf_ctx *ctx);
161 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
162 
163 
164 static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
165 {
166 	return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
167 			    struct blkcg, css);
168 }
169 
170 static inline struct blkcg *task_blkcg(struct task_struct *tsk)
171 {
172 	return container_of(task_subsys_state(tsk, blkio_subsys_id),
173 			    struct blkcg, css);
174 }
175 
176 static inline struct blkcg *bio_blkcg(struct bio *bio)
177 {
178 	if (bio && bio->bi_css)
179 		return container_of(bio->bi_css, struct blkcg, css);
180 	return task_blkcg(current);
181 }
182 
183 /**
184  * blkg_to_pdata - get policy private data
185  * @blkg: blkg of interest
186  * @pol: policy of interest
187  *
188  * Return pointer to private data associated with the @blkg-@pol pair.
189  */
190 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
191 						  struct blkcg_policy *pol)
192 {
193 	return blkg ? blkg->pd[pol->plid] : NULL;
194 }
195 
196 /**
197  * pdata_to_blkg - get blkg associated with policy private data
198  * @pd: policy private data of interest
199  *
200  * @pd is policy private data.  Determine the blkg it's associated with.
201  */
202 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
203 {
204 	return pd ? pd->blkg : NULL;
205 }
206 
207 /**
208  * blkg_path - format cgroup path of blkg
209  * @blkg: blkg of interest
210  * @buf: target buffer
211  * @buflen: target buffer length
212  *
213  * Format the path of the cgroup of @blkg into @buf.
214  */
215 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
216 {
217 	int ret;
218 
219 	rcu_read_lock();
220 	ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
221 	rcu_read_unlock();
222 	if (ret)
223 		strncpy(buf, "<unavailable>", buflen);
224 	return ret;
225 }
226 
227 /**
228  * blkg_get - get a blkg reference
229  * @blkg: blkg to get
230  *
231  * The caller should be holding queue_lock and an existing reference.
232  */
233 static inline void blkg_get(struct blkcg_gq *blkg)
234 {
235 	lockdep_assert_held(blkg->q->queue_lock);
236 	WARN_ON_ONCE(!blkg->refcnt);
237 	blkg->refcnt++;
238 }
239 
240 void __blkg_release(struct blkcg_gq *blkg);
241 
242 /**
243  * blkg_put - put a blkg reference
244  * @blkg: blkg to put
245  *
246  * The caller should be holding queue_lock.
247  */
248 static inline void blkg_put(struct blkcg_gq *blkg)
249 {
250 	lockdep_assert_held(blkg->q->queue_lock);
251 	WARN_ON_ONCE(blkg->refcnt <= 0);
252 	if (!--blkg->refcnt)
253 		__blkg_release(blkg);
254 }
255 
256 /**
257  * blk_get_rl - get request_list to use
258  * @q: request_queue of interest
259  * @bio: bio which will be attached to the allocated request (may be %NULL)
260  *
261  * The caller wants to allocate a request from @q to use for @bio.  Find
262  * the request_list to use and obtain a reference on it.  Should be called
263  * under queue_lock.  This function is guaranteed to return non-%NULL
264  * request_list.
265  */
266 static inline struct request_list *blk_get_rl(struct request_queue *q,
267 					      struct bio *bio)
268 {
269 	struct blkcg *blkcg;
270 	struct blkcg_gq *blkg;
271 
272 	rcu_read_lock();
273 
274 	blkcg = bio_blkcg(bio);
275 
276 	/* bypass blkg lookup and use @q->root_rl directly for root */
277 	if (blkcg == &blkcg_root)
278 		goto root_rl;
279 
280 	/*
281 	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
282 	 * or if either the blkcg or queue is going away.  Fall back to
283 	 * root_rl in such cases.
284 	 */
285 	blkg = blkg_lookup_create(blkcg, q);
286 	if (unlikely(IS_ERR(blkg)))
287 		goto root_rl;
288 
289 	blkg_get(blkg);
290 	rcu_read_unlock();
291 	return &blkg->rl;
292 root_rl:
293 	rcu_read_unlock();
294 	return &q->root_rl;
295 }
296 
297 /**
298  * blk_put_rl - put request_list
299  * @rl: request_list to put
300  *
301  * Put the reference acquired by blk_get_rl().  Should be called under
302  * queue_lock.
303  */
304 static inline void blk_put_rl(struct request_list *rl)
305 {
306 	/* root_rl may not have blkg set */
307 	if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
308 		blkg_put(rl->blkg);
309 }
310 
311 /**
312  * blk_rq_set_rl - associate a request with a request_list
313  * @rq: request of interest
314  * @rl: target request_list
315  *
316  * Associate @rq with @rl so that accounting and freeing can know the
317  * request_list @rq came from.
318  */
319 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
320 {
321 	rq->rl = rl;
322 }
323 
324 /**
325  * blk_rq_rl - return the request_list a request came from
326  * @rq: request of interest
327  *
328  * Return the request_list @rq is allocated from.
329  */
330 static inline struct request_list *blk_rq_rl(struct request *rq)
331 {
332 	return rq->rl;
333 }
334 
335 struct request_list *__blk_queue_next_rl(struct request_list *rl,
336 					 struct request_queue *q);
337 /**
338  * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
339  *
340  * Should be used under queue_lock.
341  */
342 #define blk_queue_for_each_rl(rl, q)	\
343 	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
344 
345 /**
346  * blkg_stat_add - add a value to a blkg_stat
347  * @stat: target blkg_stat
348  * @val: value to add
349  *
350  * Add @val to @stat.  The caller is responsible for synchronizing calls to
351  * this function.
352  */
353 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
354 {
355 	u64_stats_update_begin(&stat->syncp);
356 	stat->cnt += val;
357 	u64_stats_update_end(&stat->syncp);
358 }
359 
360 /**
361  * blkg_stat_read - read the current value of a blkg_stat
362  * @stat: blkg_stat to read
363  *
364  * Read the current value of @stat.  This function can be called without
365  * synchroniztion and takes care of u64 atomicity.
366  */
367 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
368 {
369 	unsigned int start;
370 	uint64_t v;
371 
372 	do {
373 		start = u64_stats_fetch_begin(&stat->syncp);
374 		v = stat->cnt;
375 	} while (u64_stats_fetch_retry(&stat->syncp, start));
376 
377 	return v;
378 }
379 
380 /**
381  * blkg_stat_reset - reset a blkg_stat
382  * @stat: blkg_stat to reset
383  */
384 static inline void blkg_stat_reset(struct blkg_stat *stat)
385 {
386 	stat->cnt = 0;
387 }
388 
389 /**
390  * blkg_rwstat_add - add a value to a blkg_rwstat
391  * @rwstat: target blkg_rwstat
392  * @rw: mask of REQ_{WRITE|SYNC}
393  * @val: value to add
394  *
395  * Add @val to @rwstat.  The counters are chosen according to @rw.  The
396  * caller is responsible for synchronizing calls to this function.
397  */
398 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
399 				   int rw, uint64_t val)
400 {
401 	u64_stats_update_begin(&rwstat->syncp);
402 
403 	if (rw & REQ_WRITE)
404 		rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
405 	else
406 		rwstat->cnt[BLKG_RWSTAT_READ] += val;
407 	if (rw & REQ_SYNC)
408 		rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
409 	else
410 		rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
411 
412 	u64_stats_update_end(&rwstat->syncp);
413 }
414 
415 /**
416  * blkg_rwstat_read - read the current values of a blkg_rwstat
417  * @rwstat: blkg_rwstat to read
418  *
419  * Read the current snapshot of @rwstat and return it as the return value.
420  * This function can be called without synchronization and takes care of
421  * u64 atomicity.
422  */
423 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
424 {
425 	unsigned int start;
426 	struct blkg_rwstat tmp;
427 
428 	do {
429 		start = u64_stats_fetch_begin(&rwstat->syncp);
430 		tmp = *rwstat;
431 	} while (u64_stats_fetch_retry(&rwstat->syncp, start));
432 
433 	return tmp;
434 }
435 
436 /**
437  * blkg_rwstat_sum - read the total count of a blkg_rwstat
438  * @rwstat: blkg_rwstat to read
439  *
440  * Return the total count of @rwstat regardless of the IO direction.  This
441  * function can be called without synchronization and takes care of u64
442  * atomicity.
443  */
444 static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
445 {
446 	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
447 
448 	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
449 }
450 
451 /**
452  * blkg_rwstat_reset - reset a blkg_rwstat
453  * @rwstat: blkg_rwstat to reset
454  */
455 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
456 {
457 	memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
458 }
459 
460 #else	/* CONFIG_BLK_CGROUP */
461 
462 struct cgroup;
463 struct blkcg;
464 
465 struct blkg_policy_data {
466 };
467 
468 struct blkcg_gq {
469 };
470 
471 struct blkcg_policy {
472 };
473 
474 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
475 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
476 static inline void blkcg_drain_queue(struct request_queue *q) { }
477 static inline void blkcg_exit_queue(struct request_queue *q) { }
478 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
479 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
480 static inline int blkcg_activate_policy(struct request_queue *q,
481 					const struct blkcg_policy *pol) { return 0; }
482 static inline void blkcg_deactivate_policy(struct request_queue *q,
483 					   const struct blkcg_policy *pol) { }
484 
485 static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
486 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
487 
488 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
489 						  struct blkcg_policy *pol) { return NULL; }
490 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
491 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
492 static inline void blkg_get(struct blkcg_gq *blkg) { }
493 static inline void blkg_put(struct blkcg_gq *blkg) { }
494 
495 static inline struct request_list *blk_get_rl(struct request_queue *q,
496 					      struct bio *bio) { return &q->root_rl; }
497 static inline void blk_put_rl(struct request_list *rl) { }
498 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
499 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
500 
501 #define blk_queue_for_each_rl(rl, q)	\
502 	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
503 
504 #endif	/* CONFIG_BLK_CGROUP */
505 #endif	/* _BLK_CGROUP_H */
506