1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BLK_CGROUP_PRIVATE_H 3 #define _BLK_CGROUP_PRIVATE_H 4 /* 5 * block cgroup private header 6 * 7 * Based on ideas and code from CFQ, CFS and BFQ: 8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 9 * 10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 11 * Paolo Valente <paolo.valente@unimore.it> 12 * 13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 14 * Nauman Rafique <nauman@google.com> 15 */ 16 17 #include <linux/blk-cgroup.h> 18 #include <linux/blk-mq.h> 19 20 struct blkcg_gq; 21 struct blkg_policy_data; 22 23 24 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ 25 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) 26 27 #ifdef CONFIG_BLK_CGROUP 28 29 enum blkg_iostat_type { 30 BLKG_IOSTAT_READ, 31 BLKG_IOSTAT_WRITE, 32 BLKG_IOSTAT_DISCARD, 33 34 BLKG_IOSTAT_NR, 35 }; 36 37 struct blkg_iostat { 38 u64 bytes[BLKG_IOSTAT_NR]; 39 u64 ios[BLKG_IOSTAT_NR]; 40 }; 41 42 struct blkg_iostat_set { 43 struct u64_stats_sync sync; 44 struct blkg_iostat cur; 45 struct blkg_iostat last; 46 }; 47 48 /* association between a blk cgroup and a request queue */ 49 struct blkcg_gq { 50 /* Pointer to the associated request_queue */ 51 struct request_queue *q; 52 struct list_head q_node; 53 struct hlist_node blkcg_node; 54 struct blkcg *blkcg; 55 56 /* all non-root blkcg_gq's are guaranteed to have access to parent */ 57 struct blkcg_gq *parent; 58 59 /* reference count */ 60 struct percpu_ref refcnt; 61 62 /* is this blkg online? protected by both blkcg and q locks */ 63 bool online; 64 65 struct blkg_iostat_set __percpu *iostat_cpu; 66 struct blkg_iostat_set iostat; 67 68 struct blkg_policy_data *pd[BLKCG_MAX_POLS]; 69 70 spinlock_t async_bio_lock; 71 struct bio_list async_bios; 72 union { 73 struct work_struct async_bio_work; 74 struct work_struct free_work; 75 }; 76 77 atomic_t use_delay; 78 atomic64_t delay_nsec; 79 atomic64_t delay_start; 80 u64 last_delay; 81 int last_use; 82 83 struct rcu_head rcu_head; 84 }; 85 86 struct blkcg { 87 struct cgroup_subsys_state css; 88 spinlock_t lock; 89 refcount_t online_pin; 90 91 struct radix_tree_root blkg_tree; 92 struct blkcg_gq __rcu *blkg_hint; 93 struct hlist_head blkg_list; 94 95 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; 96 97 struct list_head all_blkcgs_node; 98 #ifdef CONFIG_BLK_CGROUP_FC_APPID 99 char fc_app_id[FC_APPID_LEN]; 100 #endif 101 #ifdef CONFIG_CGROUP_WRITEBACK 102 struct list_head cgwb_list; 103 #endif 104 }; 105 106 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) 107 { 108 return css ? container_of(css, struct blkcg, css) : NULL; 109 } 110 111 /* 112 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a 113 * request_queue (q). This is used by blkcg policies which need to track 114 * information per blkcg - q pair. 115 * 116 * There can be multiple active blkcg policies and each blkg:policy pair is 117 * represented by a blkg_policy_data which is allocated and freed by each 118 * policy's pd_alloc/free_fn() methods. A policy can allocate private data 119 * area by allocating larger data structure which embeds blkg_policy_data 120 * at the beginning. 121 */ 122 struct blkg_policy_data { 123 /* the blkg and policy id this per-policy data belongs to */ 124 struct blkcg_gq *blkg; 125 int plid; 126 }; 127 128 /* 129 * Policies that need to keep per-blkcg data which is independent from any 130 * request_queue associated to it should implement cpd_alloc/free_fn() 131 * methods. A policy can allocate private data area by allocating larger 132 * data structure which embeds blkcg_policy_data at the beginning. 133 * cpd_init() is invoked to let each policy handle per-blkcg data. 134 */ 135 struct blkcg_policy_data { 136 /* the blkcg and policy id this per-policy data belongs to */ 137 struct blkcg *blkcg; 138 int plid; 139 }; 140 141 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); 142 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); 143 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); 144 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); 145 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, 146 struct request_queue *q, struct blkcg *blkcg); 147 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); 148 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); 149 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); 150 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); 151 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); 152 typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, 153 struct seq_file *s); 154 155 struct blkcg_policy { 156 int plid; 157 /* cgroup files for the policy */ 158 struct cftype *dfl_cftypes; 159 struct cftype *legacy_cftypes; 160 161 /* operations */ 162 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; 163 blkcg_pol_init_cpd_fn *cpd_init_fn; 164 blkcg_pol_free_cpd_fn *cpd_free_fn; 165 blkcg_pol_bind_cpd_fn *cpd_bind_fn; 166 167 blkcg_pol_alloc_pd_fn *pd_alloc_fn; 168 blkcg_pol_init_pd_fn *pd_init_fn; 169 blkcg_pol_online_pd_fn *pd_online_fn; 170 blkcg_pol_offline_pd_fn *pd_offline_fn; 171 blkcg_pol_free_pd_fn *pd_free_fn; 172 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; 173 blkcg_pol_stat_pd_fn *pd_stat_fn; 174 }; 175 176 extern struct blkcg blkcg_root; 177 extern bool blkcg_debug_stats; 178 179 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, 180 struct request_queue *q, bool update_hint); 181 int blkcg_init_queue(struct request_queue *q); 182 void blkcg_exit_queue(struct request_queue *q); 183 184 /* Blkio controller policy registration */ 185 int blkcg_policy_register(struct blkcg_policy *pol); 186 void blkcg_policy_unregister(struct blkcg_policy *pol); 187 int blkcg_activate_policy(struct request_queue *q, 188 const struct blkcg_policy *pol); 189 void blkcg_deactivate_policy(struct request_queue *q, 190 const struct blkcg_policy *pol); 191 192 const char *blkg_dev_name(struct blkcg_gq *blkg); 193 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 194 u64 (*prfill)(struct seq_file *, 195 struct blkg_policy_data *, int), 196 const struct blkcg_policy *pol, int data, 197 bool show_total); 198 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); 199 200 struct blkg_conf_ctx { 201 struct block_device *bdev; 202 struct blkcg_gq *blkg; 203 char *body; 204 }; 205 206 struct block_device *blkcg_conf_open_bdev(char **inputp); 207 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 208 char *input, struct blkg_conf_ctx *ctx); 209 void blkg_conf_finish(struct blkg_conf_ctx *ctx); 210 211 /** 212 * blkcg_css - find the current css 213 * 214 * Find the css associated with either the kthread or the current task. 215 * This may return a dying css, so it is up to the caller to use tryget logic 216 * to confirm it is alive and well. 217 */ 218 static inline struct cgroup_subsys_state *blkcg_css(void) 219 { 220 struct cgroup_subsys_state *css; 221 222 css = kthread_blkcg(); 223 if (css) 224 return css; 225 return task_css(current, io_cgrp_id); 226 } 227 228 /** 229 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg 230 * @return: true if this bio needs to be submitted with the root blkg context. 231 * 232 * In order to avoid priority inversions we sometimes need to issue a bio as if 233 * it were attached to the root blkg, and then backcharge to the actual owning 234 * blkg. The idea is we do bio_blkcg_css() to look up the actual context for 235 * the bio and attach the appropriate blkg to the bio. Then we call this helper 236 * and if it is true run with the root blkg for that queue and then do any 237 * backcharging to the originating cgroup once the io is complete. 238 */ 239 static inline bool bio_issue_as_root_blkg(struct bio *bio) 240 { 241 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; 242 } 243 244 /** 245 * __blkg_lookup - internal version of blkg_lookup() 246 * @blkcg: blkcg of interest 247 * @q: request_queue of interest 248 * @update_hint: whether to update lookup hint with the result or not 249 * 250 * This is internal version and shouldn't be used by policy 251 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of 252 * @q's bypass state. If @update_hint is %true, the caller should be 253 * holding @q->queue_lock and lookup hint is updated on success. 254 */ 255 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, 256 struct request_queue *q, 257 bool update_hint) 258 { 259 struct blkcg_gq *blkg; 260 261 if (blkcg == &blkcg_root) 262 return q->root_blkg; 263 264 blkg = rcu_dereference(blkcg->blkg_hint); 265 if (blkg && blkg->q == q) 266 return blkg; 267 268 return blkg_lookup_slowpath(blkcg, q, update_hint); 269 } 270 271 /** 272 * blkg_lookup - lookup blkg for the specified blkcg - q pair 273 * @blkcg: blkcg of interest 274 * @q: request_queue of interest 275 * 276 * Lookup blkg for the @blkcg - @q pair. This function should be called 277 * under RCU read lock. 278 */ 279 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, 280 struct request_queue *q) 281 { 282 WARN_ON_ONCE(!rcu_read_lock_held()); 283 return __blkg_lookup(blkcg, q, false); 284 } 285 286 /** 287 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair 288 * @q: request_queue of interest 289 * 290 * Lookup blkg for @q at the root level. See also blkg_lookup(). 291 */ 292 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) 293 { 294 return q->root_blkg; 295 } 296 297 /** 298 * blkg_to_pdata - get policy private data 299 * @blkg: blkg of interest 300 * @pol: policy of interest 301 * 302 * Return pointer to private data associated with the @blkg-@pol pair. 303 */ 304 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 305 struct blkcg_policy *pol) 306 { 307 return blkg ? blkg->pd[pol->plid] : NULL; 308 } 309 310 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, 311 struct blkcg_policy *pol) 312 { 313 return blkcg ? blkcg->cpd[pol->plid] : NULL; 314 } 315 316 /** 317 * pdata_to_blkg - get blkg associated with policy private data 318 * @pd: policy private data of interest 319 * 320 * @pd is policy private data. Determine the blkg it's associated with. 321 */ 322 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) 323 { 324 return pd ? pd->blkg : NULL; 325 } 326 327 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) 328 { 329 return cpd ? cpd->blkcg : NULL; 330 } 331 332 /** 333 * blkg_path - format cgroup path of blkg 334 * @blkg: blkg of interest 335 * @buf: target buffer 336 * @buflen: target buffer length 337 * 338 * Format the path of the cgroup of @blkg into @buf. 339 */ 340 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) 341 { 342 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); 343 } 344 345 /** 346 * blkg_get - get a blkg reference 347 * @blkg: blkg to get 348 * 349 * The caller should be holding an existing reference. 350 */ 351 static inline void blkg_get(struct blkcg_gq *blkg) 352 { 353 percpu_ref_get(&blkg->refcnt); 354 } 355 356 /** 357 * blkg_tryget - try and get a blkg reference 358 * @blkg: blkg to get 359 * 360 * This is for use when doing an RCU lookup of the blkg. We may be in the midst 361 * of freeing this blkg, so we can only use it if the refcnt is not zero. 362 */ 363 static inline bool blkg_tryget(struct blkcg_gq *blkg) 364 { 365 return blkg && percpu_ref_tryget(&blkg->refcnt); 366 } 367 368 /** 369 * blkg_put - put a blkg reference 370 * @blkg: blkg to put 371 */ 372 static inline void blkg_put(struct blkcg_gq *blkg) 373 { 374 percpu_ref_put(&blkg->refcnt); 375 } 376 377 /** 378 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants 379 * @d_blkg: loop cursor pointing to the current descendant 380 * @pos_css: used for iteration 381 * @p_blkg: target blkg to walk descendants of 382 * 383 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU 384 * read locked. If called under either blkcg or queue lock, the iteration 385 * is guaranteed to include all and only online blkgs. The caller may 386 * update @pos_css by calling css_rightmost_descendant() to skip subtree. 387 * @p_blkg is included in the iteration and the first node to be visited. 388 */ 389 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ 390 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ 391 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ 392 (p_blkg)->q, false))) 393 394 /** 395 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants 396 * @d_blkg: loop cursor pointing to the current descendant 397 * @pos_css: used for iteration 398 * @p_blkg: target blkg to walk descendants of 399 * 400 * Similar to blkg_for_each_descendant_pre() but performs post-order 401 * traversal instead. Synchronization rules are the same. @p_blkg is 402 * included in the iteration and the last node to be visited. 403 */ 404 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ 405 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ 406 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ 407 (p_blkg)->q, false))) 408 409 bool __blkcg_punt_bio_submit(struct bio *bio); 410 411 static inline bool blkcg_punt_bio_submit(struct bio *bio) 412 { 413 if (bio->bi_opf & REQ_CGROUP_PUNT) 414 return __blkcg_punt_bio_submit(bio); 415 else 416 return false; 417 } 418 419 static inline void blkcg_bio_issue_init(struct bio *bio) 420 { 421 bio_issue_init(&bio->bi_issue, bio_sectors(bio)); 422 } 423 424 static inline void blkcg_use_delay(struct blkcg_gq *blkg) 425 { 426 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) 427 return; 428 if (atomic_add_return(1, &blkg->use_delay) == 1) 429 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); 430 } 431 432 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) 433 { 434 int old = atomic_read(&blkg->use_delay); 435 436 if (WARN_ON_ONCE(old < 0)) 437 return 0; 438 if (old == 0) 439 return 0; 440 441 /* 442 * We do this song and dance because we can race with somebody else 443 * adding or removing delay. If we just did an atomic_dec we'd end up 444 * negative and we'd already be in trouble. We need to subtract 1 and 445 * then check to see if we were the last delay so we can drop the 446 * congestion count on the cgroup. 447 */ 448 while (old) { 449 int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1); 450 if (cur == old) 451 break; 452 old = cur; 453 } 454 455 if (old == 0) 456 return 0; 457 if (old == 1) 458 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); 459 return 1; 460 } 461 462 /** 463 * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount 464 * @blkg: target blkg 465 * @delay: delay duration in nsecs 466 * 467 * When enabled with this function, the delay is not decayed and must be 468 * explicitly cleared with blkcg_clear_delay(). Must not be mixed with 469 * blkcg_[un]use_delay() and blkcg_add_delay() usages. 470 */ 471 static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) 472 { 473 int old = atomic_read(&blkg->use_delay); 474 475 /* We only want 1 person setting the congestion count for this blkg. */ 476 if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old) 477 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); 478 479 atomic64_set(&blkg->delay_nsec, delay); 480 } 481 482 /** 483 * blkcg_clear_delay - Disable allocator delay mechanism 484 * @blkg: target blkg 485 * 486 * Disable use_delay mechanism. See blkcg_set_delay(). 487 */ 488 static inline void blkcg_clear_delay(struct blkcg_gq *blkg) 489 { 490 int old = atomic_read(&blkg->use_delay); 491 492 /* We only want 1 person clearing the congestion count for this blkg. */ 493 if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old) 494 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); 495 } 496 497 /** 498 * blk_cgroup_mergeable - Determine whether to allow or disallow merges 499 * @rq: request to merge into 500 * @bio: bio to merge 501 * 502 * @bio and @rq should belong to the same cgroup and their issue_as_root should 503 * match. The latter is necessary as we don't want to throttle e.g. a metadata 504 * update because it happens to be next to a regular IO. 505 */ 506 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) 507 { 508 return rq->bio->bi_blkg == bio->bi_blkg && 509 bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio); 510 } 511 512 void blk_cgroup_bio_start(struct bio *bio); 513 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); 514 #else /* CONFIG_BLK_CGROUP */ 515 516 struct blkg_policy_data { 517 }; 518 519 struct blkcg_policy_data { 520 }; 521 522 struct blkcg_policy { 523 }; 524 525 struct blkcg { 526 }; 527 528 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } 529 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) 530 { return NULL; } 531 static inline int blkcg_init_queue(struct request_queue *q) { return 0; } 532 static inline void blkcg_exit_queue(struct request_queue *q) { } 533 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } 534 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } 535 static inline int blkcg_activate_policy(struct request_queue *q, 536 const struct blkcg_policy *pol) { return 0; } 537 static inline void blkcg_deactivate_policy(struct request_queue *q, 538 const struct blkcg_policy *pol) { } 539 540 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 541 struct blkcg_policy *pol) { return NULL; } 542 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } 543 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } 544 static inline void blkg_get(struct blkcg_gq *blkg) { } 545 static inline void blkg_put(struct blkcg_gq *blkg) { } 546 547 static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } 548 static inline void blkcg_bio_issue_init(struct bio *bio) { } 549 static inline void blk_cgroup_bio_start(struct bio *bio) { } 550 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; } 551 552 #define blk_queue_for_each_rl(rl, q) \ 553 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) 554 555 #endif /* CONFIG_BLK_CGROUP */ 556 557 #endif /* _BLK_CGROUP_PRIVATE_H */ 558