1 #ifndef _BLK_CGROUP_H 2 #define _BLK_CGROUP_H 3 /* 4 * Common Block IO controller cgroup interface 5 * 6 * Based on ideas and code from CFQ, CFS and BFQ: 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 8 * 9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 10 * Paolo Valente <paolo.valente@unimore.it> 11 * 12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 13 * Nauman Rafique <nauman@google.com> 14 */ 15 16 #include <linux/cgroup.h> 17 #include <linux/u64_stats_sync.h> 18 #include <linux/seq_file.h> 19 #include <linux/radix-tree.h> 20 #include <linux/blkdev.h> 21 #include <linux/atomic.h> 22 23 /* Max limits for throttle policy */ 24 #define THROTL_IOPS_MAX UINT_MAX 25 26 /* CFQ specific, out here for blkcg->cfq_weight */ 27 #define CFQ_WEIGHT_MIN 10 28 #define CFQ_WEIGHT_MAX 1000 29 #define CFQ_WEIGHT_DEFAULT 500 30 31 #ifdef CONFIG_BLK_CGROUP 32 33 enum blkg_rwstat_type { 34 BLKG_RWSTAT_READ, 35 BLKG_RWSTAT_WRITE, 36 BLKG_RWSTAT_SYNC, 37 BLKG_RWSTAT_ASYNC, 38 39 BLKG_RWSTAT_NR, 40 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, 41 }; 42 43 struct blkcg_gq; 44 45 struct blkcg { 46 struct cgroup_subsys_state css; 47 spinlock_t lock; 48 49 struct radix_tree_root blkg_tree; 50 struct blkcg_gq *blkg_hint; 51 struct hlist_head blkg_list; 52 53 /* for policies to test whether associated blkcg has changed */ 54 uint64_t id; 55 56 /* TODO: per-policy storage in blkcg */ 57 unsigned int cfq_weight; /* belongs to cfq */ 58 unsigned int cfq_leaf_weight; 59 }; 60 61 struct blkg_stat { 62 struct u64_stats_sync syncp; 63 uint64_t cnt; 64 }; 65 66 struct blkg_rwstat { 67 struct u64_stats_sync syncp; 68 uint64_t cnt[BLKG_RWSTAT_NR]; 69 }; 70 71 /* 72 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a 73 * request_queue (q). This is used by blkcg policies which need to track 74 * information per blkcg - q pair. 75 * 76 * There can be multiple active blkcg policies and each has its private 77 * data on each blkg, the size of which is determined by 78 * blkcg_policy->pd_size. blkcg core allocates and frees such areas 79 * together with blkg and invokes pd_init/exit_fn() methods. 80 * 81 * Such private data must embed struct blkg_policy_data (pd) at the 82 * beginning and pd_size can't be smaller than pd. 83 */ 84 struct blkg_policy_data { 85 /* the blkg and policy id this per-policy data belongs to */ 86 struct blkcg_gq *blkg; 87 int plid; 88 89 /* used during policy activation */ 90 struct list_head alloc_node; 91 }; 92 93 /* association between a blk cgroup and a request queue */ 94 struct blkcg_gq { 95 /* Pointer to the associated request_queue */ 96 struct request_queue *q; 97 struct list_head q_node; 98 struct hlist_node blkcg_node; 99 struct blkcg *blkcg; 100 101 /* all non-root blkcg_gq's are guaranteed to have access to parent */ 102 struct blkcg_gq *parent; 103 104 /* request allocation list for this blkcg-q pair */ 105 struct request_list rl; 106 107 /* reference count */ 108 atomic_t refcnt; 109 110 /* is this blkg online? protected by both blkcg and q locks */ 111 bool online; 112 113 struct blkg_policy_data *pd[BLKCG_MAX_POLS]; 114 115 struct rcu_head rcu_head; 116 }; 117 118 typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg); 119 typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg); 120 typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg); 121 typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg); 122 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg); 123 124 struct blkcg_policy { 125 int plid; 126 /* policy specific private data size */ 127 size_t pd_size; 128 /* cgroup files for the policy */ 129 struct cftype *cftypes; 130 131 /* operations */ 132 blkcg_pol_init_pd_fn *pd_init_fn; 133 blkcg_pol_online_pd_fn *pd_online_fn; 134 blkcg_pol_offline_pd_fn *pd_offline_fn; 135 blkcg_pol_exit_pd_fn *pd_exit_fn; 136 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; 137 }; 138 139 extern struct blkcg blkcg_root; 140 141 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q); 142 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 143 struct request_queue *q); 144 int blkcg_init_queue(struct request_queue *q); 145 void blkcg_drain_queue(struct request_queue *q); 146 void blkcg_exit_queue(struct request_queue *q); 147 148 /* Blkio controller policy registration */ 149 int blkcg_policy_register(struct blkcg_policy *pol); 150 void blkcg_policy_unregister(struct blkcg_policy *pol); 151 int blkcg_activate_policy(struct request_queue *q, 152 const struct blkcg_policy *pol); 153 void blkcg_deactivate_policy(struct request_queue *q, 154 const struct blkcg_policy *pol); 155 156 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 157 u64 (*prfill)(struct seq_file *, 158 struct blkg_policy_data *, int), 159 const struct blkcg_policy *pol, int data, 160 bool show_total); 161 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); 162 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 163 const struct blkg_rwstat *rwstat); 164 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); 165 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 166 int off); 167 168 u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off); 169 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, 170 int off); 171 172 struct blkg_conf_ctx { 173 struct gendisk *disk; 174 struct blkcg_gq *blkg; 175 u64 v; 176 }; 177 178 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 179 const char *input, struct blkg_conf_ctx *ctx); 180 void blkg_conf_finish(struct blkg_conf_ctx *ctx); 181 182 183 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) 184 { 185 return css ? container_of(css, struct blkcg, css) : NULL; 186 } 187 188 static inline struct blkcg *task_blkcg(struct task_struct *tsk) 189 { 190 return css_to_blkcg(task_css(tsk, blkio_cgrp_id)); 191 } 192 193 static inline struct blkcg *bio_blkcg(struct bio *bio) 194 { 195 if (bio && bio->bi_css) 196 return css_to_blkcg(bio->bi_css); 197 return task_blkcg(current); 198 } 199 200 /** 201 * blkcg_parent - get the parent of a blkcg 202 * @blkcg: blkcg of interest 203 * 204 * Return the parent blkcg of @blkcg. Can be called anytime. 205 */ 206 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) 207 { 208 return css_to_blkcg(blkcg->css.parent); 209 } 210 211 /** 212 * blkg_to_pdata - get policy private data 213 * @blkg: blkg of interest 214 * @pol: policy of interest 215 * 216 * Return pointer to private data associated with the @blkg-@pol pair. 217 */ 218 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 219 struct blkcg_policy *pol) 220 { 221 return blkg ? blkg->pd[pol->plid] : NULL; 222 } 223 224 /** 225 * pdata_to_blkg - get blkg associated with policy private data 226 * @pd: policy private data of interest 227 * 228 * @pd is policy private data. Determine the blkg it's associated with. 229 */ 230 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) 231 { 232 return pd ? pd->blkg : NULL; 233 } 234 235 /** 236 * blkg_path - format cgroup path of blkg 237 * @blkg: blkg of interest 238 * @buf: target buffer 239 * @buflen: target buffer length 240 * 241 * Format the path of the cgroup of @blkg into @buf. 242 */ 243 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) 244 { 245 char *p; 246 247 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); 248 if (!p) { 249 strncpy(buf, "<unavailable>", buflen); 250 return -ENAMETOOLONG; 251 } 252 253 memmove(buf, p, buf + buflen - p); 254 return 0; 255 } 256 257 /** 258 * blkg_get - get a blkg reference 259 * @blkg: blkg to get 260 * 261 * The caller should be holding an existing reference. 262 */ 263 static inline void blkg_get(struct blkcg_gq *blkg) 264 { 265 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); 266 atomic_inc(&blkg->refcnt); 267 } 268 269 void __blkg_release_rcu(struct rcu_head *rcu); 270 271 /** 272 * blkg_put - put a blkg reference 273 * @blkg: blkg to put 274 */ 275 static inline void blkg_put(struct blkcg_gq *blkg) 276 { 277 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); 278 if (atomic_dec_and_test(&blkg->refcnt)) 279 call_rcu(&blkg->rcu_head, __blkg_release_rcu); 280 } 281 282 struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, 283 bool update_hint); 284 285 /** 286 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants 287 * @d_blkg: loop cursor pointing to the current descendant 288 * @pos_css: used for iteration 289 * @p_blkg: target blkg to walk descendants of 290 * 291 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU 292 * read locked. If called under either blkcg or queue lock, the iteration 293 * is guaranteed to include all and only online blkgs. The caller may 294 * update @pos_css by calling css_rightmost_descendant() to skip subtree. 295 * @p_blkg is included in the iteration and the first node to be visited. 296 */ 297 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ 298 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ 299 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ 300 (p_blkg)->q, false))) 301 302 /** 303 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants 304 * @d_blkg: loop cursor pointing to the current descendant 305 * @pos_css: used for iteration 306 * @p_blkg: target blkg to walk descendants of 307 * 308 * Similar to blkg_for_each_descendant_pre() but performs post-order 309 * traversal instead. Synchronization rules are the same. @p_blkg is 310 * included in the iteration and the last node to be visited. 311 */ 312 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ 313 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ 314 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ 315 (p_blkg)->q, false))) 316 317 /** 318 * blk_get_rl - get request_list to use 319 * @q: request_queue of interest 320 * @bio: bio which will be attached to the allocated request (may be %NULL) 321 * 322 * The caller wants to allocate a request from @q to use for @bio. Find 323 * the request_list to use and obtain a reference on it. Should be called 324 * under queue_lock. This function is guaranteed to return non-%NULL 325 * request_list. 326 */ 327 static inline struct request_list *blk_get_rl(struct request_queue *q, 328 struct bio *bio) 329 { 330 struct blkcg *blkcg; 331 struct blkcg_gq *blkg; 332 333 rcu_read_lock(); 334 335 blkcg = bio_blkcg(bio); 336 337 /* bypass blkg lookup and use @q->root_rl directly for root */ 338 if (blkcg == &blkcg_root) 339 goto root_rl; 340 341 /* 342 * Try to use blkg->rl. blkg lookup may fail under memory pressure 343 * or if either the blkcg or queue is going away. Fall back to 344 * root_rl in such cases. 345 */ 346 blkg = blkg_lookup_create(blkcg, q); 347 if (unlikely(IS_ERR(blkg))) 348 goto root_rl; 349 350 blkg_get(blkg); 351 rcu_read_unlock(); 352 return &blkg->rl; 353 root_rl: 354 rcu_read_unlock(); 355 return &q->root_rl; 356 } 357 358 /** 359 * blk_put_rl - put request_list 360 * @rl: request_list to put 361 * 362 * Put the reference acquired by blk_get_rl(). Should be called under 363 * queue_lock. 364 */ 365 static inline void blk_put_rl(struct request_list *rl) 366 { 367 /* root_rl may not have blkg set */ 368 if (rl->blkg && rl->blkg->blkcg != &blkcg_root) 369 blkg_put(rl->blkg); 370 } 371 372 /** 373 * blk_rq_set_rl - associate a request with a request_list 374 * @rq: request of interest 375 * @rl: target request_list 376 * 377 * Associate @rq with @rl so that accounting and freeing can know the 378 * request_list @rq came from. 379 */ 380 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) 381 { 382 rq->rl = rl; 383 } 384 385 /** 386 * blk_rq_rl - return the request_list a request came from 387 * @rq: request of interest 388 * 389 * Return the request_list @rq is allocated from. 390 */ 391 static inline struct request_list *blk_rq_rl(struct request *rq) 392 { 393 return rq->rl; 394 } 395 396 struct request_list *__blk_queue_next_rl(struct request_list *rl, 397 struct request_queue *q); 398 /** 399 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue 400 * 401 * Should be used under queue_lock. 402 */ 403 #define blk_queue_for_each_rl(rl, q) \ 404 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) 405 406 static inline void blkg_stat_init(struct blkg_stat *stat) 407 { 408 u64_stats_init(&stat->syncp); 409 } 410 411 /** 412 * blkg_stat_add - add a value to a blkg_stat 413 * @stat: target blkg_stat 414 * @val: value to add 415 * 416 * Add @val to @stat. The caller is responsible for synchronizing calls to 417 * this function. 418 */ 419 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) 420 { 421 u64_stats_update_begin(&stat->syncp); 422 stat->cnt += val; 423 u64_stats_update_end(&stat->syncp); 424 } 425 426 /** 427 * blkg_stat_read - read the current value of a blkg_stat 428 * @stat: blkg_stat to read 429 * 430 * Read the current value of @stat. This function can be called without 431 * synchroniztion and takes care of u64 atomicity. 432 */ 433 static inline uint64_t blkg_stat_read(struct blkg_stat *stat) 434 { 435 unsigned int start; 436 uint64_t v; 437 438 do { 439 start = u64_stats_fetch_begin_irq(&stat->syncp); 440 v = stat->cnt; 441 } while (u64_stats_fetch_retry_irq(&stat->syncp, start)); 442 443 return v; 444 } 445 446 /** 447 * blkg_stat_reset - reset a blkg_stat 448 * @stat: blkg_stat to reset 449 */ 450 static inline void blkg_stat_reset(struct blkg_stat *stat) 451 { 452 stat->cnt = 0; 453 } 454 455 /** 456 * blkg_stat_merge - merge a blkg_stat into another 457 * @to: the destination blkg_stat 458 * @from: the source 459 * 460 * Add @from's count to @to. 461 */ 462 static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from) 463 { 464 blkg_stat_add(to, blkg_stat_read(from)); 465 } 466 467 static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat) 468 { 469 u64_stats_init(&rwstat->syncp); 470 } 471 472 /** 473 * blkg_rwstat_add - add a value to a blkg_rwstat 474 * @rwstat: target blkg_rwstat 475 * @rw: mask of REQ_{WRITE|SYNC} 476 * @val: value to add 477 * 478 * Add @val to @rwstat. The counters are chosen according to @rw. The 479 * caller is responsible for synchronizing calls to this function. 480 */ 481 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, 482 int rw, uint64_t val) 483 { 484 u64_stats_update_begin(&rwstat->syncp); 485 486 if (rw & REQ_WRITE) 487 rwstat->cnt[BLKG_RWSTAT_WRITE] += val; 488 else 489 rwstat->cnt[BLKG_RWSTAT_READ] += val; 490 if (rw & REQ_SYNC) 491 rwstat->cnt[BLKG_RWSTAT_SYNC] += val; 492 else 493 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val; 494 495 u64_stats_update_end(&rwstat->syncp); 496 } 497 498 /** 499 * blkg_rwstat_read - read the current values of a blkg_rwstat 500 * @rwstat: blkg_rwstat to read 501 * 502 * Read the current snapshot of @rwstat and return it as the return value. 503 * This function can be called without synchronization and takes care of 504 * u64 atomicity. 505 */ 506 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) 507 { 508 unsigned int start; 509 struct blkg_rwstat tmp; 510 511 do { 512 start = u64_stats_fetch_begin_irq(&rwstat->syncp); 513 tmp = *rwstat; 514 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start)); 515 516 return tmp; 517 } 518 519 /** 520 * blkg_rwstat_total - read the total count of a blkg_rwstat 521 * @rwstat: blkg_rwstat to read 522 * 523 * Return the total count of @rwstat regardless of the IO direction. This 524 * function can be called without synchronization and takes care of u64 525 * atomicity. 526 */ 527 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) 528 { 529 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); 530 531 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]; 532 } 533 534 /** 535 * blkg_rwstat_reset - reset a blkg_rwstat 536 * @rwstat: blkg_rwstat to reset 537 */ 538 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) 539 { 540 memset(rwstat->cnt, 0, sizeof(rwstat->cnt)); 541 } 542 543 /** 544 * blkg_rwstat_merge - merge a blkg_rwstat into another 545 * @to: the destination blkg_rwstat 546 * @from: the source 547 * 548 * Add @from's counts to @to. 549 */ 550 static inline void blkg_rwstat_merge(struct blkg_rwstat *to, 551 struct blkg_rwstat *from) 552 { 553 struct blkg_rwstat v = blkg_rwstat_read(from); 554 int i; 555 556 u64_stats_update_begin(&to->syncp); 557 for (i = 0; i < BLKG_RWSTAT_NR; i++) 558 to->cnt[i] += v.cnt[i]; 559 u64_stats_update_end(&to->syncp); 560 } 561 562 #else /* CONFIG_BLK_CGROUP */ 563 564 struct cgroup; 565 struct blkcg; 566 567 struct blkg_policy_data { 568 }; 569 570 struct blkcg_gq { 571 }; 572 573 struct blkcg_policy { 574 }; 575 576 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } 577 static inline int blkcg_init_queue(struct request_queue *q) { return 0; } 578 static inline void blkcg_drain_queue(struct request_queue *q) { } 579 static inline void blkcg_exit_queue(struct request_queue *q) { } 580 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } 581 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } 582 static inline int blkcg_activate_policy(struct request_queue *q, 583 const struct blkcg_policy *pol) { return 0; } 584 static inline void blkcg_deactivate_policy(struct request_queue *q, 585 const struct blkcg_policy *pol) { } 586 587 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } 588 589 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 590 struct blkcg_policy *pol) { return NULL; } 591 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } 592 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } 593 static inline void blkg_get(struct blkcg_gq *blkg) { } 594 static inline void blkg_put(struct blkcg_gq *blkg) { } 595 596 static inline struct request_list *blk_get_rl(struct request_queue *q, 597 struct bio *bio) { return &q->root_rl; } 598 static inline void blk_put_rl(struct request_list *rl) { } 599 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } 600 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } 601 602 #define blk_queue_for_each_rl(rl, q) \ 603 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) 604 605 #endif /* CONFIG_BLK_CGROUP */ 606 #endif /* _BLK_CGROUP_H */ 607