1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BLK_CGROUP_PRIVATE_H 3 #define _BLK_CGROUP_PRIVATE_H 4 /* 5 * block cgroup private header 6 * 7 * Based on ideas and code from CFQ, CFS and BFQ: 8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 9 * 10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 11 * Paolo Valente <paolo.valente@unimore.it> 12 * 13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 14 * Nauman Rafique <nauman@google.com> 15 */ 16 17 #include <linux/blk-cgroup.h> 18 #include <linux/blk-mq.h> 19 20 struct blkcg_gq; 21 struct blkg_policy_data; 22 23 24 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ 25 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) 26 27 #ifdef CONFIG_BLK_CGROUP 28 struct blkcg { 29 struct cgroup_subsys_state css; 30 spinlock_t lock; 31 refcount_t online_pin; 32 33 struct radix_tree_root blkg_tree; 34 struct blkcg_gq __rcu *blkg_hint; 35 struct hlist_head blkg_list; 36 37 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; 38 39 struct list_head all_blkcgs_node; 40 #ifdef CONFIG_BLK_CGROUP_FC_APPID 41 char fc_app_id[FC_APPID_LEN]; 42 #endif 43 #ifdef CONFIG_CGROUP_WRITEBACK 44 struct list_head cgwb_list; 45 #endif 46 }; 47 48 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) 49 { 50 return css ? container_of(css, struct blkcg, css) : NULL; 51 } 52 53 /* 54 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a 55 * request_queue (q). This is used by blkcg policies which need to track 56 * information per blkcg - q pair. 57 * 58 * There can be multiple active blkcg policies and each blkg:policy pair is 59 * represented by a blkg_policy_data which is allocated and freed by each 60 * policy's pd_alloc/free_fn() methods. A policy can allocate private data 61 * area by allocating larger data structure which embeds blkg_policy_data 62 * at the beginning. 63 */ 64 struct blkg_policy_data { 65 /* the blkg and policy id this per-policy data belongs to */ 66 struct blkcg_gq *blkg; 67 int plid; 68 }; 69 70 /* 71 * Policies that need to keep per-blkcg data which is independent from any 72 * request_queue associated to it should implement cpd_alloc/free_fn() 73 * methods. A policy can allocate private data area by allocating larger 74 * data structure which embeds blkcg_policy_data at the beginning. 75 * cpd_init() is invoked to let each policy handle per-blkcg data. 76 */ 77 struct blkcg_policy_data { 78 /* the blkcg and policy id this per-policy data belongs to */ 79 struct blkcg *blkcg; 80 int plid; 81 }; 82 83 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); 84 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); 85 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); 86 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); 87 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, 88 struct request_queue *q, struct blkcg *blkcg); 89 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); 90 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); 91 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); 92 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); 93 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); 94 typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, 95 struct seq_file *s); 96 97 struct blkcg_policy { 98 int plid; 99 /* cgroup files for the policy */ 100 struct cftype *dfl_cftypes; 101 struct cftype *legacy_cftypes; 102 103 /* operations */ 104 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; 105 blkcg_pol_init_cpd_fn *cpd_init_fn; 106 blkcg_pol_free_cpd_fn *cpd_free_fn; 107 blkcg_pol_bind_cpd_fn *cpd_bind_fn; 108 109 blkcg_pol_alloc_pd_fn *pd_alloc_fn; 110 blkcg_pol_init_pd_fn *pd_init_fn; 111 blkcg_pol_online_pd_fn *pd_online_fn; 112 blkcg_pol_offline_pd_fn *pd_offline_fn; 113 blkcg_pol_free_pd_fn *pd_free_fn; 114 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; 115 blkcg_pol_stat_pd_fn *pd_stat_fn; 116 }; 117 118 extern struct blkcg blkcg_root; 119 extern bool blkcg_debug_stats; 120 121 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, 122 struct request_queue *q, bool update_hint); 123 int blkcg_init_queue(struct request_queue *q); 124 void blkcg_exit_queue(struct request_queue *q); 125 126 /* Blkio controller policy registration */ 127 int blkcg_policy_register(struct blkcg_policy *pol); 128 void blkcg_policy_unregister(struct blkcg_policy *pol); 129 int blkcg_activate_policy(struct request_queue *q, 130 const struct blkcg_policy *pol); 131 void blkcg_deactivate_policy(struct request_queue *q, 132 const struct blkcg_policy *pol); 133 134 const char *blkg_dev_name(struct blkcg_gq *blkg); 135 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 136 u64 (*prfill)(struct seq_file *, 137 struct blkg_policy_data *, int), 138 const struct blkcg_policy *pol, int data, 139 bool show_total); 140 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); 141 142 struct blkg_conf_ctx { 143 struct block_device *bdev; 144 struct blkcg_gq *blkg; 145 char *body; 146 }; 147 148 struct block_device *blkcg_conf_open_bdev(char **inputp); 149 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 150 char *input, struct blkg_conf_ctx *ctx); 151 void blkg_conf_finish(struct blkg_conf_ctx *ctx); 152 153 /** 154 * blkcg_css - find the current css 155 * 156 * Find the css associated with either the kthread or the current task. 157 * This may return a dying css, so it is up to the caller to use tryget logic 158 * to confirm it is alive and well. 159 */ 160 static inline struct cgroup_subsys_state *blkcg_css(void) 161 { 162 struct cgroup_subsys_state *css; 163 164 css = kthread_blkcg(); 165 if (css) 166 return css; 167 return task_css(current, io_cgrp_id); 168 } 169 170 /** 171 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg 172 * @return: true if this bio needs to be submitted with the root blkg context. 173 * 174 * In order to avoid priority inversions we sometimes need to issue a bio as if 175 * it were attached to the root blkg, and then backcharge to the actual owning 176 * blkg. The idea is we do bio_blkcg() to look up the actual context for the 177 * bio and attach the appropriate blkg to the bio. Then we call this helper and 178 * if it is true run with the root blkg for that queue and then do any 179 * backcharging to the originating cgroup once the io is complete. 180 */ 181 static inline bool bio_issue_as_root_blkg(struct bio *bio) 182 { 183 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; 184 } 185 186 /** 187 * __blkg_lookup - internal version of blkg_lookup() 188 * @blkcg: blkcg of interest 189 * @q: request_queue of interest 190 * @update_hint: whether to update lookup hint with the result or not 191 * 192 * This is internal version and shouldn't be used by policy 193 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of 194 * @q's bypass state. If @update_hint is %true, the caller should be 195 * holding @q->queue_lock and lookup hint is updated on success. 196 */ 197 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, 198 struct request_queue *q, 199 bool update_hint) 200 { 201 struct blkcg_gq *blkg; 202 203 if (blkcg == &blkcg_root) 204 return q->root_blkg; 205 206 blkg = rcu_dereference(blkcg->blkg_hint); 207 if (blkg && blkg->q == q) 208 return blkg; 209 210 return blkg_lookup_slowpath(blkcg, q, update_hint); 211 } 212 213 /** 214 * blkg_lookup - lookup blkg for the specified blkcg - q pair 215 * @blkcg: blkcg of interest 216 * @q: request_queue of interest 217 * 218 * Lookup blkg for the @blkcg - @q pair. This function should be called 219 * under RCU read lock. 220 */ 221 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, 222 struct request_queue *q) 223 { 224 WARN_ON_ONCE(!rcu_read_lock_held()); 225 return __blkg_lookup(blkcg, q, false); 226 } 227 228 /** 229 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair 230 * @q: request_queue of interest 231 * 232 * Lookup blkg for @q at the root level. See also blkg_lookup(). 233 */ 234 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) 235 { 236 return q->root_blkg; 237 } 238 239 /** 240 * blkg_to_pdata - get policy private data 241 * @blkg: blkg of interest 242 * @pol: policy of interest 243 * 244 * Return pointer to private data associated with the @blkg-@pol pair. 245 */ 246 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 247 struct blkcg_policy *pol) 248 { 249 return blkg ? blkg->pd[pol->plid] : NULL; 250 } 251 252 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, 253 struct blkcg_policy *pol) 254 { 255 return blkcg ? blkcg->cpd[pol->plid] : NULL; 256 } 257 258 /** 259 * pdata_to_blkg - get blkg associated with policy private data 260 * @pd: policy private data of interest 261 * 262 * @pd is policy private data. Determine the blkg it's associated with. 263 */ 264 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) 265 { 266 return pd ? pd->blkg : NULL; 267 } 268 269 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) 270 { 271 return cpd ? cpd->blkcg : NULL; 272 } 273 274 /** 275 * blkg_path - format cgroup path of blkg 276 * @blkg: blkg of interest 277 * @buf: target buffer 278 * @buflen: target buffer length 279 * 280 * Format the path of the cgroup of @blkg into @buf. 281 */ 282 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) 283 { 284 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); 285 } 286 287 /** 288 * blkg_get - get a blkg reference 289 * @blkg: blkg to get 290 * 291 * The caller should be holding an existing reference. 292 */ 293 static inline void blkg_get(struct blkcg_gq *blkg) 294 { 295 percpu_ref_get(&blkg->refcnt); 296 } 297 298 /** 299 * blkg_tryget - try and get a blkg reference 300 * @blkg: blkg to get 301 * 302 * This is for use when doing an RCU lookup of the blkg. We may be in the midst 303 * of freeing this blkg, so we can only use it if the refcnt is not zero. 304 */ 305 static inline bool blkg_tryget(struct blkcg_gq *blkg) 306 { 307 return blkg && percpu_ref_tryget(&blkg->refcnt); 308 } 309 310 /** 311 * blkg_put - put a blkg reference 312 * @blkg: blkg to put 313 */ 314 static inline void blkg_put(struct blkcg_gq *blkg) 315 { 316 percpu_ref_put(&blkg->refcnt); 317 } 318 319 /** 320 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants 321 * @d_blkg: loop cursor pointing to the current descendant 322 * @pos_css: used for iteration 323 * @p_blkg: target blkg to walk descendants of 324 * 325 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU 326 * read locked. If called under either blkcg or queue lock, the iteration 327 * is guaranteed to include all and only online blkgs. The caller may 328 * update @pos_css by calling css_rightmost_descendant() to skip subtree. 329 * @p_blkg is included in the iteration and the first node to be visited. 330 */ 331 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ 332 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ 333 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ 334 (p_blkg)->q, false))) 335 336 /** 337 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants 338 * @d_blkg: loop cursor pointing to the current descendant 339 * @pos_css: used for iteration 340 * @p_blkg: target blkg to walk descendants of 341 * 342 * Similar to blkg_for_each_descendant_pre() but performs post-order 343 * traversal instead. Synchronization rules are the same. @p_blkg is 344 * included in the iteration and the last node to be visited. 345 */ 346 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ 347 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ 348 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ 349 (p_blkg)->q, false))) 350 351 bool __blkcg_punt_bio_submit(struct bio *bio); 352 353 static inline bool blkcg_punt_bio_submit(struct bio *bio) 354 { 355 if (bio->bi_opf & REQ_CGROUP_PUNT) 356 return __blkcg_punt_bio_submit(bio); 357 else 358 return false; 359 } 360 361 static inline void blkcg_bio_issue_init(struct bio *bio) 362 { 363 bio_issue_init(&bio->bi_issue, bio_sectors(bio)); 364 } 365 366 static inline void blkcg_use_delay(struct blkcg_gq *blkg) 367 { 368 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) 369 return; 370 if (atomic_add_return(1, &blkg->use_delay) == 1) 371 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); 372 } 373 374 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) 375 { 376 int old = atomic_read(&blkg->use_delay); 377 378 if (WARN_ON_ONCE(old < 0)) 379 return 0; 380 if (old == 0) 381 return 0; 382 383 /* 384 * We do this song and dance because we can race with somebody else 385 * adding or removing delay. If we just did an atomic_dec we'd end up 386 * negative and we'd already be in trouble. We need to subtract 1 and 387 * then check to see if we were the last delay so we can drop the 388 * congestion count on the cgroup. 389 */ 390 while (old) { 391 int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1); 392 if (cur == old) 393 break; 394 old = cur; 395 } 396 397 if (old == 0) 398 return 0; 399 if (old == 1) 400 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); 401 return 1; 402 } 403 404 /** 405 * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount 406 * @blkg: target blkg 407 * @delay: delay duration in nsecs 408 * 409 * When enabled with this function, the delay is not decayed and must be 410 * explicitly cleared with blkcg_clear_delay(). Must not be mixed with 411 * blkcg_[un]use_delay() and blkcg_add_delay() usages. 412 */ 413 static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) 414 { 415 int old = atomic_read(&blkg->use_delay); 416 417 /* We only want 1 person setting the congestion count for this blkg. */ 418 if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old) 419 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); 420 421 atomic64_set(&blkg->delay_nsec, delay); 422 } 423 424 /** 425 * blkcg_clear_delay - Disable allocator delay mechanism 426 * @blkg: target blkg 427 * 428 * Disable use_delay mechanism. See blkcg_set_delay(). 429 */ 430 static inline void blkcg_clear_delay(struct blkcg_gq *blkg) 431 { 432 int old = atomic_read(&blkg->use_delay); 433 434 /* We only want 1 person clearing the congestion count for this blkg. */ 435 if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old) 436 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); 437 } 438 439 /** 440 * blk_cgroup_mergeable - Determine whether to allow or disallow merges 441 * @rq: request to merge into 442 * @bio: bio to merge 443 * 444 * @bio and @rq should belong to the same cgroup and their issue_as_root should 445 * match. The latter is necessary as we don't want to throttle e.g. a metadata 446 * update because it happens to be next to a regular IO. 447 */ 448 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) 449 { 450 return rq->bio->bi_blkg == bio->bi_blkg && 451 bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio); 452 } 453 454 void blk_cgroup_bio_start(struct bio *bio); 455 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); 456 #else /* CONFIG_BLK_CGROUP */ 457 458 struct blkg_policy_data { 459 }; 460 461 struct blkcg_policy_data { 462 }; 463 464 struct blkcg_policy { 465 }; 466 467 #ifdef CONFIG_BLOCK 468 469 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } 470 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) 471 { return NULL; } 472 static inline int blkcg_init_queue(struct request_queue *q) { return 0; } 473 static inline void blkcg_exit_queue(struct request_queue *q) { } 474 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } 475 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } 476 static inline int blkcg_activate_policy(struct request_queue *q, 477 const struct blkcg_policy *pol) { return 0; } 478 static inline void blkcg_deactivate_policy(struct request_queue *q, 479 const struct blkcg_policy *pol) { } 480 481 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 482 struct blkcg_policy *pol) { return NULL; } 483 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } 484 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } 485 static inline void blkg_get(struct blkcg_gq *blkg) { } 486 static inline void blkg_put(struct blkcg_gq *blkg) { } 487 488 static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } 489 static inline void blkcg_bio_issue_init(struct bio *bio) { } 490 static inline void blk_cgroup_bio_start(struct bio *bio) { } 491 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; } 492 493 #define blk_queue_for_each_rl(rl, q) \ 494 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) 495 496 #endif /* CONFIG_BLOCK */ 497 #endif /* CONFIG_BLK_CGROUP */ 498 499 #endif /* _BLK_CGROUP_PRIVATE_H */ 500