1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BLK_CGROUP_PRIVATE_H 3 #define _BLK_CGROUP_PRIVATE_H 4 /* 5 * block cgroup private header 6 * 7 * Based on ideas and code from CFQ, CFS and BFQ: 8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 9 * 10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 11 * Paolo Valente <paolo.valente@unimore.it> 12 * 13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 14 * Nauman Rafique <nauman@google.com> 15 */ 16 17 #include <linux/blk-cgroup.h> 18 #include <linux/cgroup.h> 19 #include <linux/kthread.h> 20 #include <linux/blk-mq.h> 21 22 struct blkcg_gq; 23 struct blkg_policy_data; 24 25 26 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ 27 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) 28 29 #ifdef CONFIG_BLK_CGROUP 30 31 enum blkg_iostat_type { 32 BLKG_IOSTAT_READ, 33 BLKG_IOSTAT_WRITE, 34 BLKG_IOSTAT_DISCARD, 35 36 BLKG_IOSTAT_NR, 37 }; 38 39 struct blkg_iostat { 40 u64 bytes[BLKG_IOSTAT_NR]; 41 u64 ios[BLKG_IOSTAT_NR]; 42 }; 43 44 struct blkg_iostat_set { 45 struct u64_stats_sync sync; 46 struct blkg_iostat cur; 47 struct blkg_iostat last; 48 }; 49 50 /* association between a blk cgroup and a request queue */ 51 struct blkcg_gq { 52 /* Pointer to the associated request_queue */ 53 struct request_queue *q; 54 struct list_head q_node; 55 struct hlist_node blkcg_node; 56 struct blkcg *blkcg; 57 58 /* all non-root blkcg_gq's are guaranteed to have access to parent */ 59 struct blkcg_gq *parent; 60 61 /* reference count */ 62 struct percpu_ref refcnt; 63 64 /* is this blkg online? protected by both blkcg and q locks */ 65 bool online; 66 67 struct blkg_iostat_set __percpu *iostat_cpu; 68 struct blkg_iostat_set iostat; 69 70 struct blkg_policy_data *pd[BLKCG_MAX_POLS]; 71 72 spinlock_t async_bio_lock; 73 struct bio_list async_bios; 74 union { 75 struct work_struct async_bio_work; 76 struct work_struct free_work; 77 }; 78 79 atomic_t use_delay; 80 atomic64_t delay_nsec; 81 atomic64_t delay_start; 82 u64 last_delay; 83 int last_use; 84 85 struct rcu_head rcu_head; 86 }; 87 88 struct blkcg { 89 struct cgroup_subsys_state css; 90 spinlock_t lock; 91 refcount_t online_pin; 92 93 struct radix_tree_root blkg_tree; 94 struct blkcg_gq __rcu *blkg_hint; 95 struct hlist_head blkg_list; 96 97 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; 98 99 struct list_head all_blkcgs_node; 100 #ifdef CONFIG_BLK_CGROUP_FC_APPID 101 char fc_app_id[FC_APPID_LEN]; 102 #endif 103 #ifdef CONFIG_CGROUP_WRITEBACK 104 struct list_head cgwb_list; 105 #endif 106 }; 107 108 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) 109 { 110 return css ? container_of(css, struct blkcg, css) : NULL; 111 } 112 113 /* 114 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a 115 * request_queue (q). This is used by blkcg policies which need to track 116 * information per blkcg - q pair. 117 * 118 * There can be multiple active blkcg policies and each blkg:policy pair is 119 * represented by a blkg_policy_data which is allocated and freed by each 120 * policy's pd_alloc/free_fn() methods. A policy can allocate private data 121 * area by allocating larger data structure which embeds blkg_policy_data 122 * at the beginning. 123 */ 124 struct blkg_policy_data { 125 /* the blkg and policy id this per-policy data belongs to */ 126 struct blkcg_gq *blkg; 127 int plid; 128 }; 129 130 /* 131 * Policies that need to keep per-blkcg data which is independent from any 132 * request_queue associated to it should implement cpd_alloc/free_fn() 133 * methods. A policy can allocate private data area by allocating larger 134 * data structure which embeds blkcg_policy_data at the beginning. 135 * cpd_init() is invoked to let each policy handle per-blkcg data. 136 */ 137 struct blkcg_policy_data { 138 /* the blkcg and policy id this per-policy data belongs to */ 139 struct blkcg *blkcg; 140 int plid; 141 }; 142 143 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); 144 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); 145 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); 146 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); 147 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, 148 struct request_queue *q, struct blkcg *blkcg); 149 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); 150 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); 151 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); 152 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); 153 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); 154 typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, 155 struct seq_file *s); 156 157 struct blkcg_policy { 158 int plid; 159 /* cgroup files for the policy */ 160 struct cftype *dfl_cftypes; 161 struct cftype *legacy_cftypes; 162 163 /* operations */ 164 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; 165 blkcg_pol_init_cpd_fn *cpd_init_fn; 166 blkcg_pol_free_cpd_fn *cpd_free_fn; 167 blkcg_pol_bind_cpd_fn *cpd_bind_fn; 168 169 blkcg_pol_alloc_pd_fn *pd_alloc_fn; 170 blkcg_pol_init_pd_fn *pd_init_fn; 171 blkcg_pol_online_pd_fn *pd_online_fn; 172 blkcg_pol_offline_pd_fn *pd_offline_fn; 173 blkcg_pol_free_pd_fn *pd_free_fn; 174 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; 175 blkcg_pol_stat_pd_fn *pd_stat_fn; 176 }; 177 178 extern struct blkcg blkcg_root; 179 extern bool blkcg_debug_stats; 180 181 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, 182 struct request_queue *q, bool update_hint); 183 int blkcg_init_queue(struct request_queue *q); 184 void blkcg_exit_queue(struct request_queue *q); 185 186 /* Blkio controller policy registration */ 187 int blkcg_policy_register(struct blkcg_policy *pol); 188 void blkcg_policy_unregister(struct blkcg_policy *pol); 189 int blkcg_activate_policy(struct request_queue *q, 190 const struct blkcg_policy *pol); 191 void blkcg_deactivate_policy(struct request_queue *q, 192 const struct blkcg_policy *pol); 193 194 const char *blkg_dev_name(struct blkcg_gq *blkg); 195 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 196 u64 (*prfill)(struct seq_file *, 197 struct blkg_policy_data *, int), 198 const struct blkcg_policy *pol, int data, 199 bool show_total); 200 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); 201 202 struct blkg_conf_ctx { 203 struct block_device *bdev; 204 struct blkcg_gq *blkg; 205 char *body; 206 }; 207 208 struct block_device *blkcg_conf_open_bdev(char **inputp); 209 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 210 char *input, struct blkg_conf_ctx *ctx); 211 void blkg_conf_finish(struct blkg_conf_ctx *ctx); 212 213 /** 214 * blkcg_css - find the current css 215 * 216 * Find the css associated with either the kthread or the current task. 217 * This may return a dying css, so it is up to the caller to use tryget logic 218 * to confirm it is alive and well. 219 */ 220 static inline struct cgroup_subsys_state *blkcg_css(void) 221 { 222 struct cgroup_subsys_state *css; 223 224 css = kthread_blkcg(); 225 if (css) 226 return css; 227 return task_css(current, io_cgrp_id); 228 } 229 230 /** 231 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg 232 * @return: true if this bio needs to be submitted with the root blkg context. 233 * 234 * In order to avoid priority inversions we sometimes need to issue a bio as if 235 * it were attached to the root blkg, and then backcharge to the actual owning 236 * blkg. The idea is we do bio_blkcg_css() to look up the actual context for 237 * the bio and attach the appropriate blkg to the bio. Then we call this helper 238 * and if it is true run with the root blkg for that queue and then do any 239 * backcharging to the originating cgroup once the io is complete. 240 */ 241 static inline bool bio_issue_as_root_blkg(struct bio *bio) 242 { 243 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; 244 } 245 246 /** 247 * __blkg_lookup - internal version of blkg_lookup() 248 * @blkcg: blkcg of interest 249 * @q: request_queue of interest 250 * @update_hint: whether to update lookup hint with the result or not 251 * 252 * This is internal version and shouldn't be used by policy 253 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of 254 * @q's bypass state. If @update_hint is %true, the caller should be 255 * holding @q->queue_lock and lookup hint is updated on success. 256 */ 257 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, 258 struct request_queue *q, 259 bool update_hint) 260 { 261 struct blkcg_gq *blkg; 262 263 if (blkcg == &blkcg_root) 264 return q->root_blkg; 265 266 blkg = rcu_dereference(blkcg->blkg_hint); 267 if (blkg && blkg->q == q) 268 return blkg; 269 270 return blkg_lookup_slowpath(blkcg, q, update_hint); 271 } 272 273 /** 274 * blkg_lookup - lookup blkg for the specified blkcg - q pair 275 * @blkcg: blkcg of interest 276 * @q: request_queue of interest 277 * 278 * Lookup blkg for the @blkcg - @q pair. This function should be called 279 * under RCU read lock. 280 */ 281 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, 282 struct request_queue *q) 283 { 284 WARN_ON_ONCE(!rcu_read_lock_held()); 285 return __blkg_lookup(blkcg, q, false); 286 } 287 288 /** 289 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair 290 * @q: request_queue of interest 291 * 292 * Lookup blkg for @q at the root level. See also blkg_lookup(). 293 */ 294 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) 295 { 296 return q->root_blkg; 297 } 298 299 /** 300 * blkg_to_pdata - get policy private data 301 * @blkg: blkg of interest 302 * @pol: policy of interest 303 * 304 * Return pointer to private data associated with the @blkg-@pol pair. 305 */ 306 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 307 struct blkcg_policy *pol) 308 { 309 return blkg ? blkg->pd[pol->plid] : NULL; 310 } 311 312 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, 313 struct blkcg_policy *pol) 314 { 315 return blkcg ? blkcg->cpd[pol->plid] : NULL; 316 } 317 318 /** 319 * pdata_to_blkg - get blkg associated with policy private data 320 * @pd: policy private data of interest 321 * 322 * @pd is policy private data. Determine the blkg it's associated with. 323 */ 324 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) 325 { 326 return pd ? pd->blkg : NULL; 327 } 328 329 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) 330 { 331 return cpd ? cpd->blkcg : NULL; 332 } 333 334 /** 335 * blkg_path - format cgroup path of blkg 336 * @blkg: blkg of interest 337 * @buf: target buffer 338 * @buflen: target buffer length 339 * 340 * Format the path of the cgroup of @blkg into @buf. 341 */ 342 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) 343 { 344 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); 345 } 346 347 /** 348 * blkg_get - get a blkg reference 349 * @blkg: blkg to get 350 * 351 * The caller should be holding an existing reference. 352 */ 353 static inline void blkg_get(struct blkcg_gq *blkg) 354 { 355 percpu_ref_get(&blkg->refcnt); 356 } 357 358 /** 359 * blkg_tryget - try and get a blkg reference 360 * @blkg: blkg to get 361 * 362 * This is for use when doing an RCU lookup of the blkg. We may be in the midst 363 * of freeing this blkg, so we can only use it if the refcnt is not zero. 364 */ 365 static inline bool blkg_tryget(struct blkcg_gq *blkg) 366 { 367 return blkg && percpu_ref_tryget(&blkg->refcnt); 368 } 369 370 /** 371 * blkg_put - put a blkg reference 372 * @blkg: blkg to put 373 */ 374 static inline void blkg_put(struct blkcg_gq *blkg) 375 { 376 percpu_ref_put(&blkg->refcnt); 377 } 378 379 /** 380 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants 381 * @d_blkg: loop cursor pointing to the current descendant 382 * @pos_css: used for iteration 383 * @p_blkg: target blkg to walk descendants of 384 * 385 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU 386 * read locked. If called under either blkcg or queue lock, the iteration 387 * is guaranteed to include all and only online blkgs. The caller may 388 * update @pos_css by calling css_rightmost_descendant() to skip subtree. 389 * @p_blkg is included in the iteration and the first node to be visited. 390 */ 391 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ 392 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ 393 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ 394 (p_blkg)->q, false))) 395 396 /** 397 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants 398 * @d_blkg: loop cursor pointing to the current descendant 399 * @pos_css: used for iteration 400 * @p_blkg: target blkg to walk descendants of 401 * 402 * Similar to blkg_for_each_descendant_pre() but performs post-order 403 * traversal instead. Synchronization rules are the same. @p_blkg is 404 * included in the iteration and the last node to be visited. 405 */ 406 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ 407 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ 408 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ 409 (p_blkg)->q, false))) 410 411 bool __blkcg_punt_bio_submit(struct bio *bio); 412 413 static inline bool blkcg_punt_bio_submit(struct bio *bio) 414 { 415 if (bio->bi_opf & REQ_CGROUP_PUNT) 416 return __blkcg_punt_bio_submit(bio); 417 else 418 return false; 419 } 420 421 static inline void blkcg_bio_issue_init(struct bio *bio) 422 { 423 bio_issue_init(&bio->bi_issue, bio_sectors(bio)); 424 } 425 426 static inline void blkcg_use_delay(struct blkcg_gq *blkg) 427 { 428 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) 429 return; 430 if (atomic_add_return(1, &blkg->use_delay) == 1) 431 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); 432 } 433 434 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) 435 { 436 int old = atomic_read(&blkg->use_delay); 437 438 if (WARN_ON_ONCE(old < 0)) 439 return 0; 440 if (old == 0) 441 return 0; 442 443 /* 444 * We do this song and dance because we can race with somebody else 445 * adding or removing delay. If we just did an atomic_dec we'd end up 446 * negative and we'd already be in trouble. We need to subtract 1 and 447 * then check to see if we were the last delay so we can drop the 448 * congestion count on the cgroup. 449 */ 450 while (old) { 451 int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1); 452 if (cur == old) 453 break; 454 old = cur; 455 } 456 457 if (old == 0) 458 return 0; 459 if (old == 1) 460 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); 461 return 1; 462 } 463 464 /** 465 * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount 466 * @blkg: target blkg 467 * @delay: delay duration in nsecs 468 * 469 * When enabled with this function, the delay is not decayed and must be 470 * explicitly cleared with blkcg_clear_delay(). Must not be mixed with 471 * blkcg_[un]use_delay() and blkcg_add_delay() usages. 472 */ 473 static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) 474 { 475 int old = atomic_read(&blkg->use_delay); 476 477 /* We only want 1 person setting the congestion count for this blkg. */ 478 if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old) 479 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); 480 481 atomic64_set(&blkg->delay_nsec, delay); 482 } 483 484 /** 485 * blkcg_clear_delay - Disable allocator delay mechanism 486 * @blkg: target blkg 487 * 488 * Disable use_delay mechanism. See blkcg_set_delay(). 489 */ 490 static inline void blkcg_clear_delay(struct blkcg_gq *blkg) 491 { 492 int old = atomic_read(&blkg->use_delay); 493 494 /* We only want 1 person clearing the congestion count for this blkg. */ 495 if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old) 496 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); 497 } 498 499 /** 500 * blk_cgroup_mergeable - Determine whether to allow or disallow merges 501 * @rq: request to merge into 502 * @bio: bio to merge 503 * 504 * @bio and @rq should belong to the same cgroup and their issue_as_root should 505 * match. The latter is necessary as we don't want to throttle e.g. a metadata 506 * update because it happens to be next to a regular IO. 507 */ 508 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) 509 { 510 return rq->bio->bi_blkg == bio->bi_blkg && 511 bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio); 512 } 513 514 void blk_cgroup_bio_start(struct bio *bio); 515 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); 516 #else /* CONFIG_BLK_CGROUP */ 517 518 struct blkg_policy_data { 519 }; 520 521 struct blkcg_policy_data { 522 }; 523 524 struct blkcg_policy { 525 }; 526 527 struct blkcg { 528 }; 529 530 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } 531 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) 532 { return NULL; } 533 static inline int blkcg_init_queue(struct request_queue *q) { return 0; } 534 static inline void blkcg_exit_queue(struct request_queue *q) { } 535 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } 536 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } 537 static inline int blkcg_activate_policy(struct request_queue *q, 538 const struct blkcg_policy *pol) { return 0; } 539 static inline void blkcg_deactivate_policy(struct request_queue *q, 540 const struct blkcg_policy *pol) { } 541 542 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 543 struct blkcg_policy *pol) { return NULL; } 544 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } 545 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } 546 static inline void blkg_get(struct blkcg_gq *blkg) { } 547 static inline void blkg_put(struct blkcg_gq *blkg) { } 548 549 static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } 550 static inline void blkcg_bio_issue_init(struct bio *bio) { } 551 static inline void blk_cgroup_bio_start(struct bio *bio) { } 552 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; } 553 554 #define blk_queue_for_each_rl(rl, q) \ 555 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) 556 557 #endif /* CONFIG_BLK_CGROUP */ 558 559 #endif /* _BLK_CGROUP_PRIVATE_H */ 560