1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BLK_CGROUP_PRIVATE_H 3 #define _BLK_CGROUP_PRIVATE_H 4 /* 5 * block cgroup private header 6 * 7 * Based on ideas and code from CFQ, CFS and BFQ: 8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 9 * 10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 11 * Paolo Valente <paolo.valente@unimore.it> 12 * 13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 14 * Nauman Rafique <nauman@google.com> 15 */ 16 17 #include <linux/blk-cgroup.h> 18 #include <linux/cgroup.h> 19 #include <linux/kthread.h> 20 #include <linux/blk-mq.h> 21 #include <linux/llist.h> 22 23 struct blkcg_gq; 24 struct blkg_policy_data; 25 26 27 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ 28 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) 29 30 #ifdef CONFIG_BLK_CGROUP 31 32 enum blkg_iostat_type { 33 BLKG_IOSTAT_READ, 34 BLKG_IOSTAT_WRITE, 35 BLKG_IOSTAT_DISCARD, 36 37 BLKG_IOSTAT_NR, 38 }; 39 40 struct blkg_iostat { 41 u64 bytes[BLKG_IOSTAT_NR]; 42 u64 ios[BLKG_IOSTAT_NR]; 43 }; 44 45 struct blkg_iostat_set { 46 struct u64_stats_sync sync; 47 struct blkcg_gq *blkg; 48 struct llist_node lnode; 49 int lqueued; /* queued in llist */ 50 struct blkg_iostat cur; 51 struct blkg_iostat last; 52 }; 53 54 /* association between a blk cgroup and a request queue */ 55 struct blkcg_gq { 56 /* Pointer to the associated request_queue */ 57 struct request_queue *q; 58 struct list_head q_node; 59 struct hlist_node blkcg_node; 60 struct blkcg *blkcg; 61 62 /* all non-root blkcg_gq's are guaranteed to have access to parent */ 63 struct blkcg_gq *parent; 64 65 /* reference count */ 66 struct percpu_ref refcnt; 67 68 /* is this blkg online? protected by both blkcg and q locks */ 69 bool online; 70 71 struct blkg_iostat_set __percpu *iostat_cpu; 72 struct blkg_iostat_set iostat; 73 74 struct blkg_policy_data *pd[BLKCG_MAX_POLS]; 75 76 spinlock_t async_bio_lock; 77 struct bio_list async_bios; 78 union { 79 struct work_struct async_bio_work; 80 struct work_struct free_work; 81 }; 82 83 atomic_t use_delay; 84 atomic64_t delay_nsec; 85 atomic64_t delay_start; 86 u64 last_delay; 87 int last_use; 88 89 struct rcu_head rcu_head; 90 }; 91 92 struct blkcg { 93 struct cgroup_subsys_state css; 94 spinlock_t lock; 95 refcount_t online_pin; 96 97 struct radix_tree_root blkg_tree; 98 struct blkcg_gq __rcu *blkg_hint; 99 struct hlist_head blkg_list; 100 101 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; 102 103 struct list_head all_blkcgs_node; 104 105 /* 106 * List of updated percpu blkg_iostat_set's since the last flush. 107 */ 108 struct llist_head __percpu *lhead; 109 110 #ifdef CONFIG_BLK_CGROUP_FC_APPID 111 char fc_app_id[FC_APPID_LEN]; 112 #endif 113 #ifdef CONFIG_CGROUP_WRITEBACK 114 struct list_head cgwb_list; 115 #endif 116 }; 117 118 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) 119 { 120 return css ? container_of(css, struct blkcg, css) : NULL; 121 } 122 123 /* 124 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a 125 * request_queue (q). This is used by blkcg policies which need to track 126 * information per blkcg - q pair. 127 * 128 * There can be multiple active blkcg policies and each blkg:policy pair is 129 * represented by a blkg_policy_data which is allocated and freed by each 130 * policy's pd_alloc/free_fn() methods. A policy can allocate private data 131 * area by allocating larger data structure which embeds blkg_policy_data 132 * at the beginning. 133 */ 134 struct blkg_policy_data { 135 /* the blkg and policy id this per-policy data belongs to */ 136 struct blkcg_gq *blkg; 137 int plid; 138 bool online; 139 }; 140 141 /* 142 * Policies that need to keep per-blkcg data which is independent from any 143 * request_queue associated to it should implement cpd_alloc/free_fn() 144 * methods. A policy can allocate private data area by allocating larger 145 * data structure which embeds blkcg_policy_data at the beginning. 146 * cpd_init() is invoked to let each policy handle per-blkcg data. 147 */ 148 struct blkcg_policy_data { 149 /* the blkcg and policy id this per-policy data belongs to */ 150 struct blkcg *blkcg; 151 int plid; 152 }; 153 154 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); 155 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); 156 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); 157 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); 158 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(struct gendisk *disk, 159 struct blkcg *blkcg, gfp_t gfp); 160 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); 161 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); 162 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); 163 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); 164 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); 165 typedef void (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, 166 struct seq_file *s); 167 168 struct blkcg_policy { 169 int plid; 170 /* cgroup files for the policy */ 171 struct cftype *dfl_cftypes; 172 struct cftype *legacy_cftypes; 173 174 /* operations */ 175 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; 176 blkcg_pol_free_cpd_fn *cpd_free_fn; 177 178 blkcg_pol_alloc_pd_fn *pd_alloc_fn; 179 blkcg_pol_init_pd_fn *pd_init_fn; 180 blkcg_pol_online_pd_fn *pd_online_fn; 181 blkcg_pol_offline_pd_fn *pd_offline_fn; 182 blkcg_pol_free_pd_fn *pd_free_fn; 183 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; 184 blkcg_pol_stat_pd_fn *pd_stat_fn; 185 }; 186 187 extern struct blkcg blkcg_root; 188 extern bool blkcg_debug_stats; 189 190 int blkcg_init_disk(struct gendisk *disk); 191 void blkcg_exit_disk(struct gendisk *disk); 192 193 /* Blkio controller policy registration */ 194 int blkcg_policy_register(struct blkcg_policy *pol); 195 void blkcg_policy_unregister(struct blkcg_policy *pol); 196 int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol); 197 void blkcg_deactivate_policy(struct gendisk *disk, 198 const struct blkcg_policy *pol); 199 200 const char *blkg_dev_name(struct blkcg_gq *blkg); 201 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 202 u64 (*prfill)(struct seq_file *, 203 struct blkg_policy_data *, int), 204 const struct blkcg_policy *pol, int data, 205 bool show_total); 206 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); 207 208 struct blkg_conf_ctx { 209 struct block_device *bdev; 210 struct blkcg_gq *blkg; 211 char *body; 212 }; 213 214 struct block_device *blkcg_conf_open_bdev(char **inputp); 215 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 216 char *input, struct blkg_conf_ctx *ctx); 217 void blkg_conf_finish(struct blkg_conf_ctx *ctx); 218 219 /** 220 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg 221 * @return: true if this bio needs to be submitted with the root blkg context. 222 * 223 * In order to avoid priority inversions we sometimes need to issue a bio as if 224 * it were attached to the root blkg, and then backcharge to the actual owning 225 * blkg. The idea is we do bio_blkcg_css() to look up the actual context for 226 * the bio and attach the appropriate blkg to the bio. Then we call this helper 227 * and if it is true run with the root blkg for that queue and then do any 228 * backcharging to the originating cgroup once the io is complete. 229 */ 230 static inline bool bio_issue_as_root_blkg(struct bio *bio) 231 { 232 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; 233 } 234 235 /** 236 * blkg_lookup - lookup blkg for the specified blkcg - q pair 237 * @blkcg: blkcg of interest 238 * @q: request_queue of interest 239 * 240 * Lookup blkg for the @blkcg - @q pair. 241 242 * Must be called in a RCU critical section. 243 */ 244 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, 245 struct request_queue *q) 246 { 247 struct blkcg_gq *blkg; 248 249 WARN_ON_ONCE(!rcu_read_lock_held()); 250 251 if (blkcg == &blkcg_root) 252 return q->root_blkg; 253 254 blkg = rcu_dereference(blkcg->blkg_hint); 255 if (blkg && blkg->q == q) 256 return blkg; 257 258 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); 259 if (blkg && blkg->q != q) 260 blkg = NULL; 261 return blkg; 262 } 263 264 /** 265 * blkg_to_pdata - get policy private data 266 * @blkg: blkg of interest 267 * @pol: policy of interest 268 * 269 * Return pointer to private data associated with the @blkg-@pol pair. 270 */ 271 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 272 struct blkcg_policy *pol) 273 { 274 return blkg ? blkg->pd[pol->plid] : NULL; 275 } 276 277 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, 278 struct blkcg_policy *pol) 279 { 280 return blkcg ? blkcg->cpd[pol->plid] : NULL; 281 } 282 283 /** 284 * pdata_to_blkg - get blkg associated with policy private data 285 * @pd: policy private data of interest 286 * 287 * @pd is policy private data. Determine the blkg it's associated with. 288 */ 289 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) 290 { 291 return pd ? pd->blkg : NULL; 292 } 293 294 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) 295 { 296 return cpd ? cpd->blkcg : NULL; 297 } 298 299 /** 300 * blkg_path - format cgroup path of blkg 301 * @blkg: blkg of interest 302 * @buf: target buffer 303 * @buflen: target buffer length 304 * 305 * Format the path of the cgroup of @blkg into @buf. 306 */ 307 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) 308 { 309 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); 310 } 311 312 /** 313 * blkg_get - get a blkg reference 314 * @blkg: blkg to get 315 * 316 * The caller should be holding an existing reference. 317 */ 318 static inline void blkg_get(struct blkcg_gq *blkg) 319 { 320 percpu_ref_get(&blkg->refcnt); 321 } 322 323 /** 324 * blkg_tryget - try and get a blkg reference 325 * @blkg: blkg to get 326 * 327 * This is for use when doing an RCU lookup of the blkg. We may be in the midst 328 * of freeing this blkg, so we can only use it if the refcnt is not zero. 329 */ 330 static inline bool blkg_tryget(struct blkcg_gq *blkg) 331 { 332 return blkg && percpu_ref_tryget(&blkg->refcnt); 333 } 334 335 /** 336 * blkg_put - put a blkg reference 337 * @blkg: blkg to put 338 */ 339 static inline void blkg_put(struct blkcg_gq *blkg) 340 { 341 percpu_ref_put(&blkg->refcnt); 342 } 343 344 /** 345 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants 346 * @d_blkg: loop cursor pointing to the current descendant 347 * @pos_css: used for iteration 348 * @p_blkg: target blkg to walk descendants of 349 * 350 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU 351 * read locked. If called under either blkcg or queue lock, the iteration 352 * is guaranteed to include all and only online blkgs. The caller may 353 * update @pos_css by calling css_rightmost_descendant() to skip subtree. 354 * @p_blkg is included in the iteration and the first node to be visited. 355 */ 356 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ 357 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ 358 if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \ 359 (p_blkg)->q))) 360 361 /** 362 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants 363 * @d_blkg: loop cursor pointing to the current descendant 364 * @pos_css: used for iteration 365 * @p_blkg: target blkg to walk descendants of 366 * 367 * Similar to blkg_for_each_descendant_pre() but performs post-order 368 * traversal instead. Synchronization rules are the same. @p_blkg is 369 * included in the iteration and the last node to be visited. 370 */ 371 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ 372 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ 373 if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \ 374 (p_blkg)->q))) 375 376 bool __blkcg_punt_bio_submit(struct bio *bio); 377 378 static inline bool blkcg_punt_bio_submit(struct bio *bio) 379 { 380 if (bio->bi_opf & REQ_CGROUP_PUNT) 381 return __blkcg_punt_bio_submit(bio); 382 else 383 return false; 384 } 385 386 static inline void blkcg_bio_issue_init(struct bio *bio) 387 { 388 bio_issue_init(&bio->bi_issue, bio_sectors(bio)); 389 } 390 391 static inline void blkcg_use_delay(struct blkcg_gq *blkg) 392 { 393 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) 394 return; 395 if (atomic_add_return(1, &blkg->use_delay) == 1) 396 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); 397 } 398 399 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) 400 { 401 int old = atomic_read(&blkg->use_delay); 402 403 if (WARN_ON_ONCE(old < 0)) 404 return 0; 405 if (old == 0) 406 return 0; 407 408 /* 409 * We do this song and dance because we can race with somebody else 410 * adding or removing delay. If we just did an atomic_dec we'd end up 411 * negative and we'd already be in trouble. We need to subtract 1 and 412 * then check to see if we were the last delay so we can drop the 413 * congestion count on the cgroup. 414 */ 415 while (old && !atomic_try_cmpxchg(&blkg->use_delay, &old, old - 1)) 416 ; 417 418 if (old == 0) 419 return 0; 420 if (old == 1) 421 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); 422 return 1; 423 } 424 425 /** 426 * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount 427 * @blkg: target blkg 428 * @delay: delay duration in nsecs 429 * 430 * When enabled with this function, the delay is not decayed and must be 431 * explicitly cleared with blkcg_clear_delay(). Must not be mixed with 432 * blkcg_[un]use_delay() and blkcg_add_delay() usages. 433 */ 434 static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) 435 { 436 int old = atomic_read(&blkg->use_delay); 437 438 /* We only want 1 person setting the congestion count for this blkg. */ 439 if (!old && atomic_try_cmpxchg(&blkg->use_delay, &old, -1)) 440 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); 441 442 atomic64_set(&blkg->delay_nsec, delay); 443 } 444 445 /** 446 * blkcg_clear_delay - Disable allocator delay mechanism 447 * @blkg: target blkg 448 * 449 * Disable use_delay mechanism. See blkcg_set_delay(). 450 */ 451 static inline void blkcg_clear_delay(struct blkcg_gq *blkg) 452 { 453 int old = atomic_read(&blkg->use_delay); 454 455 /* We only want 1 person clearing the congestion count for this blkg. */ 456 if (old && atomic_try_cmpxchg(&blkg->use_delay, &old, 0)) 457 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); 458 } 459 460 /** 461 * blk_cgroup_mergeable - Determine whether to allow or disallow merges 462 * @rq: request to merge into 463 * @bio: bio to merge 464 * 465 * @bio and @rq should belong to the same cgroup and their issue_as_root should 466 * match. The latter is necessary as we don't want to throttle e.g. a metadata 467 * update because it happens to be next to a regular IO. 468 */ 469 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) 470 { 471 return rq->bio->bi_blkg == bio->bi_blkg && 472 bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio); 473 } 474 475 void blk_cgroup_bio_start(struct bio *bio); 476 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); 477 #else /* CONFIG_BLK_CGROUP */ 478 479 struct blkg_policy_data { 480 }; 481 482 struct blkcg_policy_data { 483 }; 484 485 struct blkcg_policy { 486 }; 487 488 struct blkcg { 489 }; 490 491 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } 492 static inline int blkcg_init_disk(struct gendisk *disk) { return 0; } 493 static inline void blkcg_exit_disk(struct gendisk *disk) { } 494 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } 495 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } 496 static inline int blkcg_activate_policy(struct gendisk *disk, 497 const struct blkcg_policy *pol) { return 0; } 498 static inline void blkcg_deactivate_policy(struct gendisk *disk, 499 const struct blkcg_policy *pol) { } 500 501 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 502 struct blkcg_policy *pol) { return NULL; } 503 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } 504 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } 505 static inline void blkg_get(struct blkcg_gq *blkg) { } 506 static inline void blkg_put(struct blkcg_gq *blkg) { } 507 508 static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } 509 static inline void blkcg_bio_issue_init(struct bio *bio) { } 510 static inline void blk_cgroup_bio_start(struct bio *bio) { } 511 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; } 512 513 #define blk_queue_for_each_rl(rl, q) \ 514 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) 515 516 #endif /* CONFIG_BLK_CGROUP */ 517 518 #endif /* _BLK_CGROUP_PRIVATE_H */ 519