1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * cgroups support for the BFQ I/O scheduler.
4 */
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/cgroup.h>
9 #include <linux/ktime.h>
10 #include <linux/rbtree.h>
11 #include <linux/ioprio.h>
12 #include <linux/sbitmap.h>
13 #include <linux/delay.h>
14
15 #include "elevator.h"
16 #include "bfq-iosched.h"
17
18 #ifdef CONFIG_BFQ_CGROUP_DEBUG
bfq_stat_init(struct bfq_stat * stat,gfp_t gfp)19 static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
20 {
21 int ret;
22
23 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
24 if (ret)
25 return ret;
26
27 atomic64_set(&stat->aux_cnt, 0);
28 return 0;
29 }
30
bfq_stat_exit(struct bfq_stat * stat)31 static void bfq_stat_exit(struct bfq_stat *stat)
32 {
33 percpu_counter_destroy(&stat->cpu_cnt);
34 }
35
36 /**
37 * bfq_stat_add - add a value to a bfq_stat
38 * @stat: target bfq_stat
39 * @val: value to add
40 *
41 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
42 * don't re-enter this function for the same counter.
43 */
bfq_stat_add(struct bfq_stat * stat,uint64_t val)44 static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
45 {
46 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
47 }
48
49 /**
50 * bfq_stat_read - read the current value of a bfq_stat
51 * @stat: bfq_stat to read
52 */
bfq_stat_read(struct bfq_stat * stat)53 static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
54 {
55 return percpu_counter_sum_positive(&stat->cpu_cnt);
56 }
57
58 /**
59 * bfq_stat_reset - reset a bfq_stat
60 * @stat: bfq_stat to reset
61 */
bfq_stat_reset(struct bfq_stat * stat)62 static inline void bfq_stat_reset(struct bfq_stat *stat)
63 {
64 percpu_counter_set(&stat->cpu_cnt, 0);
65 atomic64_set(&stat->aux_cnt, 0);
66 }
67
68 /**
69 * bfq_stat_add_aux - add a bfq_stat into another's aux count
70 * @to: the destination bfq_stat
71 * @from: the source
72 *
73 * Add @from's count including the aux one to @to's aux count.
74 */
bfq_stat_add_aux(struct bfq_stat * to,struct bfq_stat * from)75 static inline void bfq_stat_add_aux(struct bfq_stat *to,
76 struct bfq_stat *from)
77 {
78 atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
79 &to->aux_cnt);
80 }
81
82 /**
83 * blkg_prfill_stat - prfill callback for bfq_stat
84 * @sf: seq_file to print to
85 * @pd: policy private data of interest
86 * @off: offset to the bfq_stat in @pd
87 *
88 * prfill callback for printing a bfq_stat.
89 */
blkg_prfill_stat(struct seq_file * sf,struct blkg_policy_data * pd,int off)90 static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
91 int off)
92 {
93 return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
94 }
95
96 /* bfqg stats flags */
97 enum bfqg_stats_flags {
98 BFQG_stats_waiting = 0,
99 BFQG_stats_idling,
100 BFQG_stats_empty,
101 };
102
103 #define BFQG_FLAG_FNS(name) \
104 static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
105 { \
106 stats->flags |= (1 << BFQG_stats_##name); \
107 } \
108 static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
109 { \
110 stats->flags &= ~(1 << BFQG_stats_##name); \
111 } \
112 static int bfqg_stats_##name(struct bfqg_stats *stats) \
113 { \
114 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
115 } \
116
117 BFQG_FLAG_FNS(waiting)
BFQG_FLAG_FNS(idling)118 BFQG_FLAG_FNS(idling)
119 BFQG_FLAG_FNS(empty)
120 #undef BFQG_FLAG_FNS
121
122 /* This should be called with the scheduler lock held. */
123 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
124 {
125 u64 now;
126
127 if (!bfqg_stats_waiting(stats))
128 return;
129
130 now = ktime_get_ns();
131 if (now > stats->start_group_wait_time)
132 bfq_stat_add(&stats->group_wait_time,
133 now - stats->start_group_wait_time);
134 bfqg_stats_clear_waiting(stats);
135 }
136
137 /* This should be called with the scheduler lock held. */
bfqg_stats_set_start_group_wait_time(struct bfq_group * bfqg,struct bfq_group * curr_bfqg)138 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
139 struct bfq_group *curr_bfqg)
140 {
141 struct bfqg_stats *stats = &bfqg->stats;
142
143 if (bfqg_stats_waiting(stats))
144 return;
145 if (bfqg == curr_bfqg)
146 return;
147 stats->start_group_wait_time = ktime_get_ns();
148 bfqg_stats_mark_waiting(stats);
149 }
150
151 /* This should be called with the scheduler lock held. */
bfqg_stats_end_empty_time(struct bfqg_stats * stats)152 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
153 {
154 u64 now;
155
156 if (!bfqg_stats_empty(stats))
157 return;
158
159 now = ktime_get_ns();
160 if (now > stats->start_empty_time)
161 bfq_stat_add(&stats->empty_time,
162 now - stats->start_empty_time);
163 bfqg_stats_clear_empty(stats);
164 }
165
bfqg_stats_update_dequeue(struct bfq_group * bfqg)166 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
167 {
168 bfq_stat_add(&bfqg->stats.dequeue, 1);
169 }
170
bfqg_stats_set_start_empty_time(struct bfq_group * bfqg)171 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
172 {
173 struct bfqg_stats *stats = &bfqg->stats;
174
175 if (blkg_rwstat_total(&stats->queued))
176 return;
177
178 /*
179 * group is already marked empty. This can happen if bfqq got new
180 * request in parent group and moved to this group while being added
181 * to service tree. Just ignore the event and move on.
182 */
183 if (bfqg_stats_empty(stats))
184 return;
185
186 stats->start_empty_time = ktime_get_ns();
187 bfqg_stats_mark_empty(stats);
188 }
189
bfqg_stats_update_idle_time(struct bfq_group * bfqg)190 void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
191 {
192 struct bfqg_stats *stats = &bfqg->stats;
193
194 if (bfqg_stats_idling(stats)) {
195 u64 now = ktime_get_ns();
196
197 if (now > stats->start_idle_time)
198 bfq_stat_add(&stats->idle_time,
199 now - stats->start_idle_time);
200 bfqg_stats_clear_idling(stats);
201 }
202 }
203
bfqg_stats_set_start_idle_time(struct bfq_group * bfqg)204 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
205 {
206 struct bfqg_stats *stats = &bfqg->stats;
207
208 stats->start_idle_time = ktime_get_ns();
209 bfqg_stats_mark_idling(stats);
210 }
211
bfqg_stats_update_avg_queue_size(struct bfq_group * bfqg)212 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
213 {
214 struct bfqg_stats *stats = &bfqg->stats;
215
216 bfq_stat_add(&stats->avg_queue_size_sum,
217 blkg_rwstat_total(&stats->queued));
218 bfq_stat_add(&stats->avg_queue_size_samples, 1);
219 bfqg_stats_update_group_wait_time(stats);
220 }
221
bfqg_stats_update_io_add(struct bfq_group * bfqg,struct bfq_queue * bfqq,blk_opf_t opf)222 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
223 blk_opf_t opf)
224 {
225 blkg_rwstat_add(&bfqg->stats.queued, opf, 1);
226 bfqg_stats_end_empty_time(&bfqg->stats);
227 if (!(bfqq == bfqg->bfqd->in_service_queue))
228 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
229 }
230
bfqg_stats_update_io_remove(struct bfq_group * bfqg,blk_opf_t opf)231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf)
232 {
233 blkg_rwstat_add(&bfqg->stats.queued, opf, -1);
234 }
235
bfqg_stats_update_io_merged(struct bfq_group * bfqg,blk_opf_t opf)236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf)
237 {
238 blkg_rwstat_add(&bfqg->stats.merged, opf, 1);
239 }
240
bfqg_stats_update_completion(struct bfq_group * bfqg,u64 start_time_ns,u64 io_start_time_ns,blk_opf_t opf)241 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
242 u64 io_start_time_ns, blk_opf_t opf)
243 {
244 struct bfqg_stats *stats = &bfqg->stats;
245 u64 now = ktime_get_ns();
246
247 if (now > io_start_time_ns)
248 blkg_rwstat_add(&stats->service_time, opf,
249 now - io_start_time_ns);
250 if (io_start_time_ns > start_time_ns)
251 blkg_rwstat_add(&stats->wait_time, opf,
252 io_start_time_ns - start_time_ns);
253 }
254
255 #else /* CONFIG_BFQ_CGROUP_DEBUG */
256
bfqg_stats_update_io_remove(struct bfq_group * bfqg,blk_opf_t opf)257 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { }
bfqg_stats_update_io_merged(struct bfq_group * bfqg,blk_opf_t opf)258 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { }
bfqg_stats_update_completion(struct bfq_group * bfqg,u64 start_time_ns,u64 io_start_time_ns,blk_opf_t opf)259 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
260 u64 io_start_time_ns, blk_opf_t opf) { }
bfqg_stats_update_dequeue(struct bfq_group * bfqg)261 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
bfqg_stats_set_start_idle_time(struct bfq_group * bfqg)262 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
263
264 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
265
266 #ifdef CONFIG_BFQ_GROUP_IOSCHED
267
268 /*
269 * blk-cgroup policy-related handlers
270 * The following functions help in converting between blk-cgroup
271 * internal structures and BFQ-specific structures.
272 */
273
pd_to_bfqg(struct blkg_policy_data * pd)274 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
275 {
276 return pd ? container_of(pd, struct bfq_group, pd) : NULL;
277 }
278
bfqg_to_blkg(struct bfq_group * bfqg)279 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
280 {
281 return pd_to_blkg(&bfqg->pd);
282 }
283
blkg_to_bfqg(struct blkcg_gq * blkg)284 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
285 {
286 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
287 }
288
289 /*
290 * bfq_group handlers
291 * The following functions help in navigating the bfq_group hierarchy
292 * by allowing to find the parent of a bfq_group or the bfq_group
293 * associated to a bfq_queue.
294 */
295
bfqg_parent(struct bfq_group * bfqg)296 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
297 {
298 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
299
300 return pblkg ? blkg_to_bfqg(pblkg) : NULL;
301 }
302
bfqq_group(struct bfq_queue * bfqq)303 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
304 {
305 struct bfq_entity *group_entity = bfqq->entity.parent;
306
307 return group_entity ? container_of(group_entity, struct bfq_group,
308 entity) :
309 bfqq->bfqd->root_group;
310 }
311
312 /*
313 * The following two functions handle get and put of a bfq_group by
314 * wrapping the related blk-cgroup hooks.
315 */
316
bfqg_get(struct bfq_group * bfqg)317 static void bfqg_get(struct bfq_group *bfqg)
318 {
319 refcount_inc(&bfqg->ref);
320 }
321
bfqg_put(struct bfq_group * bfqg)322 static void bfqg_put(struct bfq_group *bfqg)
323 {
324 if (refcount_dec_and_test(&bfqg->ref))
325 kfree(bfqg);
326 }
327
bfqg_and_blkg_get(struct bfq_group * bfqg)328 static void bfqg_and_blkg_get(struct bfq_group *bfqg)
329 {
330 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
331 bfqg_get(bfqg);
332
333 blkg_get(bfqg_to_blkg(bfqg));
334 }
335
bfqg_and_blkg_put(struct bfq_group * bfqg)336 void bfqg_and_blkg_put(struct bfq_group *bfqg)
337 {
338 blkg_put(bfqg_to_blkg(bfqg));
339
340 bfqg_put(bfqg);
341 }
342
bfqg_stats_update_legacy_io(struct request_queue * q,struct request * rq)343 void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
344 {
345 struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
346
347 if (!bfqg)
348 return;
349
350 blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
351 blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
352 }
353
354 /* @stats = 0 */
bfqg_stats_reset(struct bfqg_stats * stats)355 static void bfqg_stats_reset(struct bfqg_stats *stats)
356 {
357 #ifdef CONFIG_BFQ_CGROUP_DEBUG
358 /* queued stats shouldn't be cleared */
359 blkg_rwstat_reset(&stats->merged);
360 blkg_rwstat_reset(&stats->service_time);
361 blkg_rwstat_reset(&stats->wait_time);
362 bfq_stat_reset(&stats->time);
363 bfq_stat_reset(&stats->avg_queue_size_sum);
364 bfq_stat_reset(&stats->avg_queue_size_samples);
365 bfq_stat_reset(&stats->dequeue);
366 bfq_stat_reset(&stats->group_wait_time);
367 bfq_stat_reset(&stats->idle_time);
368 bfq_stat_reset(&stats->empty_time);
369 #endif
370 }
371
372 /* @to += @from */
bfqg_stats_add_aux(struct bfqg_stats * to,struct bfqg_stats * from)373 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
374 {
375 if (!to || !from)
376 return;
377
378 #ifdef CONFIG_BFQ_CGROUP_DEBUG
379 /* queued stats shouldn't be cleared */
380 blkg_rwstat_add_aux(&to->merged, &from->merged);
381 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
382 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
383 bfq_stat_add_aux(&from->time, &from->time);
384 bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
385 bfq_stat_add_aux(&to->avg_queue_size_samples,
386 &from->avg_queue_size_samples);
387 bfq_stat_add_aux(&to->dequeue, &from->dequeue);
388 bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
389 bfq_stat_add_aux(&to->idle_time, &from->idle_time);
390 bfq_stat_add_aux(&to->empty_time, &from->empty_time);
391 #endif
392 }
393
394 /*
395 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
396 * recursive stats can still account for the amount used by this bfqg after
397 * it's gone.
398 */
bfqg_stats_xfer_dead(struct bfq_group * bfqg)399 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
400 {
401 struct bfq_group *parent;
402
403 if (!bfqg) /* root_group */
404 return;
405
406 parent = bfqg_parent(bfqg);
407
408 lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
409
410 if (unlikely(!parent))
411 return;
412
413 bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
414 bfqg_stats_reset(&bfqg->stats);
415 }
416
bfq_init_entity(struct bfq_entity * entity,struct bfq_group * bfqg)417 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
418 {
419 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
420
421 entity->weight = entity->new_weight;
422 entity->orig_weight = entity->new_weight;
423 if (bfqq) {
424 bfqq->ioprio = bfqq->new_ioprio;
425 bfqq->ioprio_class = bfqq->new_ioprio_class;
426 /*
427 * Make sure that bfqg and its associated blkg do not
428 * disappear before entity.
429 */
430 bfqg_and_blkg_get(bfqg);
431 }
432 entity->parent = bfqg->my_entity; /* NULL for root group */
433 entity->sched_data = &bfqg->sched_data;
434 }
435
bfqg_stats_exit(struct bfqg_stats * stats)436 static void bfqg_stats_exit(struct bfqg_stats *stats)
437 {
438 blkg_rwstat_exit(&stats->bytes);
439 blkg_rwstat_exit(&stats->ios);
440 #ifdef CONFIG_BFQ_CGROUP_DEBUG
441 blkg_rwstat_exit(&stats->merged);
442 blkg_rwstat_exit(&stats->service_time);
443 blkg_rwstat_exit(&stats->wait_time);
444 blkg_rwstat_exit(&stats->queued);
445 bfq_stat_exit(&stats->time);
446 bfq_stat_exit(&stats->avg_queue_size_sum);
447 bfq_stat_exit(&stats->avg_queue_size_samples);
448 bfq_stat_exit(&stats->dequeue);
449 bfq_stat_exit(&stats->group_wait_time);
450 bfq_stat_exit(&stats->idle_time);
451 bfq_stat_exit(&stats->empty_time);
452 #endif
453 }
454
bfqg_stats_init(struct bfqg_stats * stats,gfp_t gfp)455 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
456 {
457 if (blkg_rwstat_init(&stats->bytes, gfp) ||
458 blkg_rwstat_init(&stats->ios, gfp))
459 goto error;
460
461 #ifdef CONFIG_BFQ_CGROUP_DEBUG
462 if (blkg_rwstat_init(&stats->merged, gfp) ||
463 blkg_rwstat_init(&stats->service_time, gfp) ||
464 blkg_rwstat_init(&stats->wait_time, gfp) ||
465 blkg_rwstat_init(&stats->queued, gfp) ||
466 bfq_stat_init(&stats->time, gfp) ||
467 bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
468 bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
469 bfq_stat_init(&stats->dequeue, gfp) ||
470 bfq_stat_init(&stats->group_wait_time, gfp) ||
471 bfq_stat_init(&stats->idle_time, gfp) ||
472 bfq_stat_init(&stats->empty_time, gfp))
473 goto error;
474 #endif
475
476 return 0;
477
478 error:
479 bfqg_stats_exit(stats);
480 return -ENOMEM;
481 }
482
cpd_to_bfqgd(struct blkcg_policy_data * cpd)483 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
484 {
485 return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
486 }
487
blkcg_to_bfqgd(struct blkcg * blkcg)488 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
489 {
490 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
491 }
492
bfq_cpd_alloc(gfp_t gfp)493 static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
494 {
495 struct bfq_group_data *bgd;
496
497 bgd = kzalloc(sizeof(*bgd), gfp);
498 if (!bgd)
499 return NULL;
500
501 bgd->weight = CGROUP_WEIGHT_DFL;
502 return &bgd->pd;
503 }
504
bfq_cpd_free(struct blkcg_policy_data * cpd)505 static void bfq_cpd_free(struct blkcg_policy_data *cpd)
506 {
507 kfree(cpd_to_bfqgd(cpd));
508 }
509
bfq_pd_alloc(struct gendisk * disk,struct blkcg * blkcg,gfp_t gfp)510 static struct blkg_policy_data *bfq_pd_alloc(struct gendisk *disk,
511 struct blkcg *blkcg, gfp_t gfp)
512 {
513 struct bfq_group *bfqg;
514
515 bfqg = kzalloc_node(sizeof(*bfqg), gfp, disk->node_id);
516 if (!bfqg)
517 return NULL;
518
519 if (bfqg_stats_init(&bfqg->stats, gfp)) {
520 kfree(bfqg);
521 return NULL;
522 }
523
524 /* see comments in bfq_bic_update_cgroup for why refcounting */
525 refcount_set(&bfqg->ref, 1);
526 return &bfqg->pd;
527 }
528
bfq_pd_init(struct blkg_policy_data * pd)529 static void bfq_pd_init(struct blkg_policy_data *pd)
530 {
531 struct blkcg_gq *blkg = pd_to_blkg(pd);
532 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
533 struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
534 struct bfq_entity *entity = &bfqg->entity;
535 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
536
537 entity->orig_weight = entity->weight = entity->new_weight = d->weight;
538 entity->my_sched_data = &bfqg->sched_data;
539 entity->last_bfqq_created = NULL;
540
541 bfqg->my_entity = entity; /*
542 * the root_group's will be set to NULL
543 * in bfq_init_queue()
544 */
545 bfqg->bfqd = bfqd;
546 bfqg->active_entities = 0;
547 bfqg->num_queues_with_pending_reqs = 0;
548 bfqg->rq_pos_tree = RB_ROOT;
549 }
550
bfq_pd_free(struct blkg_policy_data * pd)551 static void bfq_pd_free(struct blkg_policy_data *pd)
552 {
553 struct bfq_group *bfqg = pd_to_bfqg(pd);
554
555 bfqg_stats_exit(&bfqg->stats);
556 bfqg_put(bfqg);
557 }
558
bfq_pd_reset_stats(struct blkg_policy_data * pd)559 static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
560 {
561 struct bfq_group *bfqg = pd_to_bfqg(pd);
562
563 bfqg_stats_reset(&bfqg->stats);
564 }
565
bfq_group_set_parent(struct bfq_group * bfqg,struct bfq_group * parent)566 static void bfq_group_set_parent(struct bfq_group *bfqg,
567 struct bfq_group *parent)
568 {
569 struct bfq_entity *entity;
570
571 entity = &bfqg->entity;
572 entity->parent = parent->my_entity;
573 entity->sched_data = &parent->sched_data;
574 }
575
bfq_link_bfqg(struct bfq_data * bfqd,struct bfq_group * bfqg)576 static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
577 {
578 struct bfq_group *parent;
579 struct bfq_entity *entity;
580
581 /*
582 * Update chain of bfq_groups as we might be handling a leaf group
583 * which, along with some of its relatives, has not been hooked yet
584 * to the private hierarchy of BFQ.
585 */
586 entity = &bfqg->entity;
587 for_each_entity(entity) {
588 struct bfq_group *curr_bfqg = container_of(entity,
589 struct bfq_group, entity);
590 if (curr_bfqg != bfqd->root_group) {
591 parent = bfqg_parent(curr_bfqg);
592 if (!parent)
593 parent = bfqd->root_group;
594 bfq_group_set_parent(curr_bfqg, parent);
595 }
596 }
597 }
598
bfq_bio_bfqg(struct bfq_data * bfqd,struct bio * bio)599 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
600 {
601 struct blkcg_gq *blkg = bio->bi_blkg;
602 struct bfq_group *bfqg;
603
604 while (blkg) {
605 if (!blkg->online) {
606 blkg = blkg->parent;
607 continue;
608 }
609 bfqg = blkg_to_bfqg(blkg);
610 if (bfqg->pd.online) {
611 bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
612 return bfqg;
613 }
614 blkg = blkg->parent;
615 }
616 bio_associate_blkg_from_css(bio,
617 &bfqg_to_blkg(bfqd->root_group)->blkcg->css);
618 return bfqd->root_group;
619 }
620
621 /**
622 * bfq_bfqq_move - migrate @bfqq to @bfqg.
623 * @bfqd: queue descriptor.
624 * @bfqq: the queue to move.
625 * @bfqg: the group to move to.
626 *
627 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
628 * it on the new one. Avoid putting the entity on the old group idle tree.
629 *
630 * Must be called under the scheduler lock, to make sure that the blkg
631 * owning @bfqg does not disappear (see comments in
632 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
633 * objects).
634 */
bfq_bfqq_move(struct bfq_data * bfqd,struct bfq_queue * bfqq,struct bfq_group * bfqg)635 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
636 struct bfq_group *bfqg)
637 {
638 struct bfq_entity *entity = &bfqq->entity;
639 struct bfq_group *old_parent = bfqq_group(bfqq);
640 bool has_pending_reqs = false;
641
642 /*
643 * No point to move bfqq to the same group, which can happen when
644 * root group is offlined
645 */
646 if (old_parent == bfqg)
647 return;
648
649 /*
650 * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group
651 * until elevator exit.
652 */
653 if (bfqq == &bfqd->oom_bfqq)
654 return;
655 /*
656 * Get extra reference to prevent bfqq from being freed in
657 * next possible expire or deactivate.
658 */
659 bfqq->ref++;
660
661 if (entity->in_groups_with_pending_reqs) {
662 has_pending_reqs = true;
663 bfq_del_bfqq_in_groups_with_pending_reqs(bfqq);
664 }
665
666 /* If bfqq is empty, then bfq_bfqq_expire also invokes
667 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
668 * from data structures related to current group. Otherwise we
669 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
670 * we do below.
671 */
672 if (bfqq == bfqd->in_service_queue)
673 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
674 false, BFQQE_PREEMPTED);
675
676 if (bfq_bfqq_busy(bfqq))
677 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
678 else if (entity->on_st_or_in_serv)
679 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
680 bfqg_and_blkg_put(old_parent);
681
682 if (entity->parent &&
683 entity->parent->last_bfqq_created == bfqq)
684 entity->parent->last_bfqq_created = NULL;
685 else if (bfqd->last_bfqq_created == bfqq)
686 bfqd->last_bfqq_created = NULL;
687
688 entity->parent = bfqg->my_entity;
689 entity->sched_data = &bfqg->sched_data;
690 /* pin down bfqg and its associated blkg */
691 bfqg_and_blkg_get(bfqg);
692
693 if (has_pending_reqs)
694 bfq_add_bfqq_in_groups_with_pending_reqs(bfqq);
695
696 if (bfq_bfqq_busy(bfqq)) {
697 if (unlikely(!bfqd->nonrot_with_queueing))
698 bfq_pos_tree_add_move(bfqd, bfqq);
699 bfq_activate_bfqq(bfqd, bfqq);
700 }
701
702 if (!bfqd->in_service_queue && !bfqd->tot_rq_in_driver)
703 bfq_schedule_dispatch(bfqd);
704 /* release extra ref taken above, bfqq may happen to be freed now */
705 bfq_put_queue(bfqq);
706 }
707
bfq_sync_bfqq_move(struct bfq_data * bfqd,struct bfq_queue * sync_bfqq,struct bfq_io_cq * bic,struct bfq_group * bfqg,unsigned int act_idx)708 static void bfq_sync_bfqq_move(struct bfq_data *bfqd,
709 struct bfq_queue *sync_bfqq,
710 struct bfq_io_cq *bic,
711 struct bfq_group *bfqg,
712 unsigned int act_idx)
713 {
714 struct bfq_queue *bfqq;
715
716 if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) {
717 /* We are the only user of this bfqq, just move it */
718 if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
719 bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
720 return;
721 }
722
723 /*
724 * The queue was merged to a different queue. Check
725 * that the merge chain still belongs to the same
726 * cgroup.
727 */
728 for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq)
729 if (bfqq->entity.sched_data != &bfqg->sched_data)
730 break;
731 if (bfqq) {
732 /*
733 * Some queue changed cgroup so the merge is not valid
734 * anymore. We cannot easily just cancel the merge (by
735 * clearing new_bfqq) as there may be other processes
736 * using this queue and holding refs to all queues
737 * below sync_bfqq->new_bfqq. Similarly if the merge
738 * already happened, we need to detach from bfqq now
739 * so that we cannot merge bio to a request from the
740 * old cgroup.
741 */
742 bfq_put_cooperator(sync_bfqq);
743 bic_set_bfqq(bic, NULL, true, act_idx);
744 bfq_release_process_ref(bfqd, sync_bfqq);
745 }
746 }
747
748 /**
749 * __bfq_bic_change_cgroup - move @bic to @bfqg.
750 * @bfqd: the queue descriptor.
751 * @bic: the bic to move.
752 * @bfqg: the group to move to.
753 *
754 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
755 * sure that the reference to cgroup is valid across the call (see
756 * comments in bfq_bic_update_cgroup on this issue)
757 */
__bfq_bic_change_cgroup(struct bfq_data * bfqd,struct bfq_io_cq * bic,struct bfq_group * bfqg)758 static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
759 struct bfq_io_cq *bic,
760 struct bfq_group *bfqg)
761 {
762 unsigned int act_idx;
763
764 for (act_idx = 0; act_idx < bfqd->num_actuators; act_idx++) {
765 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false, act_idx);
766 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true, act_idx);
767
768 if (async_bfqq &&
769 async_bfqq->entity.sched_data != &bfqg->sched_data) {
770 bic_set_bfqq(bic, NULL, false, act_idx);
771 bfq_release_process_ref(bfqd, async_bfqq);
772 }
773
774 if (sync_bfqq)
775 bfq_sync_bfqq_move(bfqd, sync_bfqq, bic, bfqg, act_idx);
776 }
777 }
778
bfq_bic_update_cgroup(struct bfq_io_cq * bic,struct bio * bio)779 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
780 {
781 struct bfq_data *bfqd = bic_to_bfqd(bic);
782 struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
783 uint64_t serial_nr;
784
785 serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
786
787 /*
788 * Check whether blkcg has changed. The condition may trigger
789 * spuriously on a newly created cic but there's no harm.
790 */
791 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
792 return;
793
794 /*
795 * New cgroup for this process. Make sure it is linked to bfq internal
796 * cgroup hierarchy.
797 */
798 bfq_link_bfqg(bfqd, bfqg);
799 __bfq_bic_change_cgroup(bfqd, bic, bfqg);
800 /*
801 * Update blkg_path for bfq_log_* functions. We cache this
802 * path, and update it here, for the following
803 * reasons. Operations on blkg objects in blk-cgroup are
804 * protected with the request_queue lock, and not with the
805 * lock that protects the instances of this scheduler
806 * (bfqd->lock). This exposes BFQ to the following sort of
807 * race.
808 *
809 * The blkg_lookup performed in bfq_get_queue, protected
810 * through rcu, may happen to return the address of a copy of
811 * the original blkg. If this is the case, then the
812 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
813 * the blkg, is useless: it does not prevent blk-cgroup code
814 * from destroying both the original blkg and all objects
815 * directly or indirectly referred by the copy of the
816 * blkg.
817 *
818 * On the bright side, destroy operations on a blkg invoke, as
819 * a first step, hooks of the scheduler associated with the
820 * blkg. And these hooks are executed with bfqd->lock held for
821 * BFQ. As a consequence, for any blkg associated with the
822 * request queue this instance of the scheduler is attached
823 * to, we are guaranteed that such a blkg is not destroyed, and
824 * that all the pointers it contains are consistent, while we
825 * are holding bfqd->lock. A blkg_lookup performed with
826 * bfqd->lock held then returns a fully consistent blkg, which
827 * remains consistent until this lock is held.
828 *
829 * Thanks to the last fact, and to the fact that: (1) bfqg has
830 * been obtained through a blkg_lookup in the above
831 * assignment, and (2) bfqd->lock is being held, here we can
832 * safely use the policy data for the involved blkg (i.e., the
833 * field bfqg->pd) to get to the blkg associated with bfqg,
834 * and then we can safely use any field of blkg. After we
835 * release bfqd->lock, even just getting blkg through this
836 * bfqg may cause dangling references to be traversed, as
837 * bfqg->pd may not exist any more.
838 *
839 * In view of the above facts, here we cache, in the bfqg, any
840 * blkg data we may need for this bic, and for its associated
841 * bfq_queue. As of now, we need to cache only the path of the
842 * blkg, which is used in the bfq_log_* functions.
843 *
844 * Finally, note that bfqg itself needs to be protected from
845 * destruction on the blkg_free of the original blkg (which
846 * invokes bfq_pd_free). We use an additional private
847 * refcounter for bfqg, to let it disappear only after no
848 * bfq_queue refers to it any longer.
849 */
850 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
851 bic->blkcg_serial_nr = serial_nr;
852 }
853
854 /**
855 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
856 * @st: the service tree being flushed.
857 */
bfq_flush_idle_tree(struct bfq_service_tree * st)858 static void bfq_flush_idle_tree(struct bfq_service_tree *st)
859 {
860 struct bfq_entity *entity = st->first_idle;
861
862 for (; entity ; entity = st->first_idle)
863 __bfq_deactivate_entity(entity, false);
864 }
865
866 /**
867 * bfq_reparent_leaf_entity - move leaf entity to the root_group.
868 * @bfqd: the device data structure with the root group.
869 * @entity: the entity to move, if entity is a leaf; or the parent entity
870 * of an active leaf entity to move, if entity is not a leaf.
871 * @ioprio_class: I/O priority class to reparent.
872 */
bfq_reparent_leaf_entity(struct bfq_data * bfqd,struct bfq_entity * entity,int ioprio_class)873 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
874 struct bfq_entity *entity,
875 int ioprio_class)
876 {
877 struct bfq_queue *bfqq;
878 struct bfq_entity *child_entity = entity;
879
880 while (child_entity->my_sched_data) { /* leaf not reached yet */
881 struct bfq_sched_data *child_sd = child_entity->my_sched_data;
882 struct bfq_service_tree *child_st = child_sd->service_tree +
883 ioprio_class;
884 struct rb_root *child_active = &child_st->active;
885
886 child_entity = bfq_entity_of(rb_first(child_active));
887
888 if (!child_entity)
889 child_entity = child_sd->in_service_entity;
890 }
891
892 bfqq = bfq_entity_to_bfqq(child_entity);
893 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
894 }
895
896 /**
897 * bfq_reparent_active_queues - move to the root group all active queues.
898 * @bfqd: the device data structure with the root group.
899 * @bfqg: the group to move from.
900 * @st: the service tree to start the search from.
901 * @ioprio_class: I/O priority class to reparent.
902 */
bfq_reparent_active_queues(struct bfq_data * bfqd,struct bfq_group * bfqg,struct bfq_service_tree * st,int ioprio_class)903 static void bfq_reparent_active_queues(struct bfq_data *bfqd,
904 struct bfq_group *bfqg,
905 struct bfq_service_tree *st,
906 int ioprio_class)
907 {
908 struct rb_root *active = &st->active;
909 struct bfq_entity *entity;
910
911 while ((entity = bfq_entity_of(rb_first(active))))
912 bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
913
914 if (bfqg->sched_data.in_service_entity)
915 bfq_reparent_leaf_entity(bfqd,
916 bfqg->sched_data.in_service_entity,
917 ioprio_class);
918 }
919
920 /**
921 * bfq_pd_offline - deactivate the entity associated with @pd,
922 * and reparent its children entities.
923 * @pd: descriptor of the policy going offline.
924 *
925 * blkio already grabs the queue_lock for us, so no need to use
926 * RCU-based magic
927 */
bfq_pd_offline(struct blkg_policy_data * pd)928 static void bfq_pd_offline(struct blkg_policy_data *pd)
929 {
930 struct bfq_service_tree *st;
931 struct bfq_group *bfqg = pd_to_bfqg(pd);
932 struct bfq_data *bfqd = bfqg->bfqd;
933 struct bfq_entity *entity = bfqg->my_entity;
934 unsigned long flags;
935 int i;
936
937 spin_lock_irqsave(&bfqd->lock, flags);
938
939 if (!entity) /* root group */
940 goto put_async_queues;
941
942 /*
943 * Empty all service_trees belonging to this group before
944 * deactivating the group itself.
945 */
946 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
947 st = bfqg->sched_data.service_tree + i;
948
949 /*
950 * It may happen that some queues are still active
951 * (busy) upon group destruction (if the corresponding
952 * processes have been forced to terminate). We move
953 * all the leaf entities corresponding to these queues
954 * to the root_group.
955 * Also, it may happen that the group has an entity
956 * in service, which is disconnected from the active
957 * tree: it must be moved, too.
958 * There is no need to put the sync queues, as the
959 * scheduler has taken no reference.
960 */
961 bfq_reparent_active_queues(bfqd, bfqg, st, i);
962
963 /*
964 * The idle tree may still contain bfq_queues
965 * belonging to exited task because they never
966 * migrated to a different cgroup from the one being
967 * destroyed now. In addition, even
968 * bfq_reparent_active_queues() may happen to add some
969 * entities to the idle tree. It happens if, in some
970 * of the calls to bfq_bfqq_move() performed by
971 * bfq_reparent_active_queues(), the queue to move is
972 * empty and gets expired.
973 */
974 bfq_flush_idle_tree(st);
975 }
976
977 __bfq_deactivate_entity(entity, false);
978
979 put_async_queues:
980 bfq_put_async_queues(bfqd, bfqg);
981
982 spin_unlock_irqrestore(&bfqd->lock, flags);
983 /*
984 * @blkg is going offline and will be ignored by
985 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
986 * that they don't get lost. If IOs complete after this point, the
987 * stats for them will be lost. Oh well...
988 */
989 bfqg_stats_xfer_dead(bfqg);
990 }
991
bfq_end_wr_async(struct bfq_data * bfqd)992 void bfq_end_wr_async(struct bfq_data *bfqd)
993 {
994 struct blkcg_gq *blkg;
995
996 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
997 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
998
999 bfq_end_wr_async_queues(bfqd, bfqg);
1000 }
1001 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1002 }
1003
bfq_io_show_weight_legacy(struct seq_file * sf,void * v)1004 static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
1005 {
1006 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1007 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1008 unsigned int val = 0;
1009
1010 if (bfqgd)
1011 val = bfqgd->weight;
1012
1013 seq_printf(sf, "%u\n", val);
1014
1015 return 0;
1016 }
1017
bfqg_prfill_weight_device(struct seq_file * sf,struct blkg_policy_data * pd,int off)1018 static u64 bfqg_prfill_weight_device(struct seq_file *sf,
1019 struct blkg_policy_data *pd, int off)
1020 {
1021 struct bfq_group *bfqg = pd_to_bfqg(pd);
1022
1023 if (!bfqg->entity.dev_weight)
1024 return 0;
1025 return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
1026 }
1027
bfq_io_show_weight(struct seq_file * sf,void * v)1028 static int bfq_io_show_weight(struct seq_file *sf, void *v)
1029 {
1030 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1031 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1032
1033 seq_printf(sf, "default %u\n", bfqgd->weight);
1034 blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
1035 &blkcg_policy_bfq, 0, false);
1036 return 0;
1037 }
1038
bfq_group_set_weight(struct bfq_group * bfqg,u64 weight,u64 dev_weight)1039 static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
1040 {
1041 weight = dev_weight ?: weight;
1042
1043 bfqg->entity.dev_weight = dev_weight;
1044 /*
1045 * Setting the prio_changed flag of the entity
1046 * to 1 with new_weight == weight would re-set
1047 * the value of the weight to its ioprio mapping.
1048 * Set the flag only if necessary.
1049 */
1050 if ((unsigned short)weight != bfqg->entity.new_weight) {
1051 bfqg->entity.new_weight = (unsigned short)weight;
1052 /*
1053 * Make sure that the above new value has been
1054 * stored in bfqg->entity.new_weight before
1055 * setting the prio_changed flag. In fact,
1056 * this flag may be read asynchronously (in
1057 * critical sections protected by a different
1058 * lock than that held here), and finding this
1059 * flag set may cause the execution of the code
1060 * for updating parameters whose value may
1061 * depend also on bfqg->entity.new_weight (in
1062 * __bfq_entity_update_weight_prio).
1063 * This barrier makes sure that the new value
1064 * of bfqg->entity.new_weight is correctly
1065 * seen in that code.
1066 */
1067 smp_wmb();
1068 bfqg->entity.prio_changed = 1;
1069 }
1070 }
1071
bfq_io_set_weight_legacy(struct cgroup_subsys_state * css,struct cftype * cftype,u64 val)1072 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
1073 struct cftype *cftype,
1074 u64 val)
1075 {
1076 struct blkcg *blkcg = css_to_blkcg(css);
1077 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1078 struct blkcg_gq *blkg;
1079 int ret = -ERANGE;
1080
1081 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
1082 return ret;
1083
1084 ret = 0;
1085 spin_lock_irq(&blkcg->lock);
1086 bfqgd->weight = (unsigned short)val;
1087 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1088 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1089
1090 if (bfqg)
1091 bfq_group_set_weight(bfqg, val, 0);
1092 }
1093 spin_unlock_irq(&blkcg->lock);
1094
1095 return ret;
1096 }
1097
bfq_io_set_device_weight(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1098 static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
1099 char *buf, size_t nbytes,
1100 loff_t off)
1101 {
1102 int ret;
1103 struct blkg_conf_ctx ctx;
1104 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1105 struct bfq_group *bfqg;
1106 u64 v;
1107
1108 blkg_conf_init(&ctx, buf);
1109
1110 ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, &ctx);
1111 if (ret)
1112 goto out;
1113
1114 if (sscanf(ctx.body, "%llu", &v) == 1) {
1115 /* require "default" on dfl */
1116 ret = -ERANGE;
1117 if (!v)
1118 goto out;
1119 } else if (!strcmp(strim(ctx.body), "default")) {
1120 v = 0;
1121 } else {
1122 ret = -EINVAL;
1123 goto out;
1124 }
1125
1126 bfqg = blkg_to_bfqg(ctx.blkg);
1127
1128 ret = -ERANGE;
1129 if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
1130 bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1131 ret = 0;
1132 }
1133 out:
1134 blkg_conf_exit(&ctx);
1135 return ret ?: nbytes;
1136 }
1137
bfq_io_set_weight(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1138 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
1139 char *buf, size_t nbytes,
1140 loff_t off)
1141 {
1142 char *endp;
1143 int ret;
1144 u64 v;
1145
1146 buf = strim(buf);
1147
1148 /* "WEIGHT" or "default WEIGHT" sets the default weight */
1149 v = simple_strtoull(buf, &endp, 0);
1150 if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
1151 ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
1152 return ret ?: nbytes;
1153 }
1154
1155 return bfq_io_set_device_weight(of, buf, nbytes, off);
1156 }
1157
bfqg_print_rwstat(struct seq_file * sf,void * v)1158 static int bfqg_print_rwstat(struct seq_file *sf, void *v)
1159 {
1160 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1161 &blkcg_policy_bfq, seq_cft(sf)->private, true);
1162 return 0;
1163 }
1164
bfqg_prfill_rwstat_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)1165 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
1166 struct blkg_policy_data *pd, int off)
1167 {
1168 struct blkg_rwstat_sample sum;
1169
1170 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
1171 return __blkg_prfill_rwstat(sf, pd, &sum);
1172 }
1173
bfqg_print_rwstat_recursive(struct seq_file * sf,void * v)1174 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1175 {
1176 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1177 bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
1178 seq_cft(sf)->private, true);
1179 return 0;
1180 }
1181
1182 #ifdef CONFIG_BFQ_CGROUP_DEBUG
bfqg_print_stat(struct seq_file * sf,void * v)1183 static int bfqg_print_stat(struct seq_file *sf, void *v)
1184 {
1185 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1186 &blkcg_policy_bfq, seq_cft(sf)->private, false);
1187 return 0;
1188 }
1189
bfqg_prfill_stat_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)1190 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
1191 struct blkg_policy_data *pd, int off)
1192 {
1193 struct blkcg_gq *blkg = pd_to_blkg(pd);
1194 struct blkcg_gq *pos_blkg;
1195 struct cgroup_subsys_state *pos_css;
1196 u64 sum = 0;
1197
1198 lockdep_assert_held(&blkg->q->queue_lock);
1199
1200 rcu_read_lock();
1201 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1202 struct bfq_stat *stat;
1203
1204 if (!pos_blkg->online)
1205 continue;
1206
1207 stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
1208 sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
1209 }
1210 rcu_read_unlock();
1211
1212 return __blkg_prfill_u64(sf, pd, sum);
1213 }
1214
bfqg_print_stat_recursive(struct seq_file * sf,void * v)1215 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
1216 {
1217 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1218 bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
1219 seq_cft(sf)->private, false);
1220 return 0;
1221 }
1222
bfqg_prfill_sectors(struct seq_file * sf,struct blkg_policy_data * pd,int off)1223 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1224 int off)
1225 {
1226 struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
1227 u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
1228
1229 return __blkg_prfill_u64(sf, pd, sum >> 9);
1230 }
1231
bfqg_print_stat_sectors(struct seq_file * sf,void * v)1232 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
1233 {
1234 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1235 bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
1236 return 0;
1237 }
1238
bfqg_prfill_sectors_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)1239 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
1240 struct blkg_policy_data *pd, int off)
1241 {
1242 struct blkg_rwstat_sample tmp;
1243
1244 blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
1245 offsetof(struct bfq_group, stats.bytes), &tmp);
1246
1247 return __blkg_prfill_u64(sf, pd,
1248 (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
1249 }
1250
bfqg_print_stat_sectors_recursive(struct seq_file * sf,void * v)1251 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1252 {
1253 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1254 bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
1255 false);
1256 return 0;
1257 }
1258
bfqg_prfill_avg_queue_size(struct seq_file * sf,struct blkg_policy_data * pd,int off)1259 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1260 struct blkg_policy_data *pd, int off)
1261 {
1262 struct bfq_group *bfqg = pd_to_bfqg(pd);
1263 u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
1264 u64 v = 0;
1265
1266 if (samples) {
1267 v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
1268 v = div64_u64(v, samples);
1269 }
1270 __blkg_prfill_u64(sf, pd, v);
1271 return 0;
1272 }
1273
1274 /* print avg_queue_size */
bfqg_print_avg_queue_size(struct seq_file * sf,void * v)1275 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1276 {
1277 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1278 bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1279 0, false);
1280 return 0;
1281 }
1282 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1283
bfq_create_group_hierarchy(struct bfq_data * bfqd,int node)1284 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1285 {
1286 int ret;
1287
1288 ret = blkcg_activate_policy(bfqd->queue->disk, &blkcg_policy_bfq);
1289 if (ret)
1290 return NULL;
1291
1292 return blkg_to_bfqg(bfqd->queue->root_blkg);
1293 }
1294
1295 struct blkcg_policy blkcg_policy_bfq = {
1296 .dfl_cftypes = bfq_blkg_files,
1297 .legacy_cftypes = bfq_blkcg_legacy_files,
1298
1299 .cpd_alloc_fn = bfq_cpd_alloc,
1300 .cpd_free_fn = bfq_cpd_free,
1301
1302 .pd_alloc_fn = bfq_pd_alloc,
1303 .pd_init_fn = bfq_pd_init,
1304 .pd_offline_fn = bfq_pd_offline,
1305 .pd_free_fn = bfq_pd_free,
1306 .pd_reset_stats_fn = bfq_pd_reset_stats,
1307 };
1308
1309 struct cftype bfq_blkcg_legacy_files[] = {
1310 {
1311 .name = "bfq.weight",
1312 .flags = CFTYPE_NOT_ON_ROOT,
1313 .seq_show = bfq_io_show_weight_legacy,
1314 .write_u64 = bfq_io_set_weight_legacy,
1315 },
1316 {
1317 .name = "bfq.weight_device",
1318 .flags = CFTYPE_NOT_ON_ROOT,
1319 .seq_show = bfq_io_show_weight,
1320 .write = bfq_io_set_weight,
1321 },
1322
1323 /* statistics, covers only the tasks in the bfqg */
1324 {
1325 .name = "bfq.io_service_bytes",
1326 .private = offsetof(struct bfq_group, stats.bytes),
1327 .seq_show = bfqg_print_rwstat,
1328 },
1329 {
1330 .name = "bfq.io_serviced",
1331 .private = offsetof(struct bfq_group, stats.ios),
1332 .seq_show = bfqg_print_rwstat,
1333 },
1334 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1335 {
1336 .name = "bfq.time",
1337 .private = offsetof(struct bfq_group, stats.time),
1338 .seq_show = bfqg_print_stat,
1339 },
1340 {
1341 .name = "bfq.sectors",
1342 .seq_show = bfqg_print_stat_sectors,
1343 },
1344 {
1345 .name = "bfq.io_service_time",
1346 .private = offsetof(struct bfq_group, stats.service_time),
1347 .seq_show = bfqg_print_rwstat,
1348 },
1349 {
1350 .name = "bfq.io_wait_time",
1351 .private = offsetof(struct bfq_group, stats.wait_time),
1352 .seq_show = bfqg_print_rwstat,
1353 },
1354 {
1355 .name = "bfq.io_merged",
1356 .private = offsetof(struct bfq_group, stats.merged),
1357 .seq_show = bfqg_print_rwstat,
1358 },
1359 {
1360 .name = "bfq.io_queued",
1361 .private = offsetof(struct bfq_group, stats.queued),
1362 .seq_show = bfqg_print_rwstat,
1363 },
1364 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1365
1366 /* the same statistics which cover the bfqg and its descendants */
1367 {
1368 .name = "bfq.io_service_bytes_recursive",
1369 .private = offsetof(struct bfq_group, stats.bytes),
1370 .seq_show = bfqg_print_rwstat_recursive,
1371 },
1372 {
1373 .name = "bfq.io_serviced_recursive",
1374 .private = offsetof(struct bfq_group, stats.ios),
1375 .seq_show = bfqg_print_rwstat_recursive,
1376 },
1377 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1378 {
1379 .name = "bfq.time_recursive",
1380 .private = offsetof(struct bfq_group, stats.time),
1381 .seq_show = bfqg_print_stat_recursive,
1382 },
1383 {
1384 .name = "bfq.sectors_recursive",
1385 .seq_show = bfqg_print_stat_sectors_recursive,
1386 },
1387 {
1388 .name = "bfq.io_service_time_recursive",
1389 .private = offsetof(struct bfq_group, stats.service_time),
1390 .seq_show = bfqg_print_rwstat_recursive,
1391 },
1392 {
1393 .name = "bfq.io_wait_time_recursive",
1394 .private = offsetof(struct bfq_group, stats.wait_time),
1395 .seq_show = bfqg_print_rwstat_recursive,
1396 },
1397 {
1398 .name = "bfq.io_merged_recursive",
1399 .private = offsetof(struct bfq_group, stats.merged),
1400 .seq_show = bfqg_print_rwstat_recursive,
1401 },
1402 {
1403 .name = "bfq.io_queued_recursive",
1404 .private = offsetof(struct bfq_group, stats.queued),
1405 .seq_show = bfqg_print_rwstat_recursive,
1406 },
1407 {
1408 .name = "bfq.avg_queue_size",
1409 .seq_show = bfqg_print_avg_queue_size,
1410 },
1411 {
1412 .name = "bfq.group_wait_time",
1413 .private = offsetof(struct bfq_group, stats.group_wait_time),
1414 .seq_show = bfqg_print_stat,
1415 },
1416 {
1417 .name = "bfq.idle_time",
1418 .private = offsetof(struct bfq_group, stats.idle_time),
1419 .seq_show = bfqg_print_stat,
1420 },
1421 {
1422 .name = "bfq.empty_time",
1423 .private = offsetof(struct bfq_group, stats.empty_time),
1424 .seq_show = bfqg_print_stat,
1425 },
1426 {
1427 .name = "bfq.dequeue",
1428 .private = offsetof(struct bfq_group, stats.dequeue),
1429 .seq_show = bfqg_print_stat,
1430 },
1431 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1432 { } /* terminate */
1433 };
1434
1435 struct cftype bfq_blkg_files[] = {
1436 {
1437 .name = "bfq.weight",
1438 .flags = CFTYPE_NOT_ON_ROOT,
1439 .seq_show = bfq_io_show_weight,
1440 .write = bfq_io_set_weight,
1441 },
1442 {} /* terminate */
1443 };
1444
1445 #else /* CONFIG_BFQ_GROUP_IOSCHED */
1446
bfq_bfqq_move(struct bfq_data * bfqd,struct bfq_queue * bfqq,struct bfq_group * bfqg)1447 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1448 struct bfq_group *bfqg) {}
1449
bfq_init_entity(struct bfq_entity * entity,struct bfq_group * bfqg)1450 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1451 {
1452 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1453
1454 entity->weight = entity->new_weight;
1455 entity->orig_weight = entity->new_weight;
1456 if (bfqq) {
1457 bfqq->ioprio = bfqq->new_ioprio;
1458 bfqq->ioprio_class = bfqq->new_ioprio_class;
1459 }
1460 entity->sched_data = &bfqg->sched_data;
1461 }
1462
bfq_bic_update_cgroup(struct bfq_io_cq * bic,struct bio * bio)1463 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1464
bfq_end_wr_async(struct bfq_data * bfqd)1465 void bfq_end_wr_async(struct bfq_data *bfqd)
1466 {
1467 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1468 }
1469
bfq_bio_bfqg(struct bfq_data * bfqd,struct bio * bio)1470 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
1471 {
1472 return bfqd->root_group;
1473 }
1474
bfqq_group(struct bfq_queue * bfqq)1475 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1476 {
1477 return bfqq->bfqd->root_group;
1478 }
1479
bfqg_and_blkg_put(struct bfq_group * bfqg)1480 void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
1481
bfq_create_group_hierarchy(struct bfq_data * bfqd,int node)1482 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1483 {
1484 struct bfq_group *bfqg;
1485 int i;
1486
1487 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1488 if (!bfqg)
1489 return NULL;
1490
1491 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1492 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1493
1494 return bfqg;
1495 }
1496 #endif /* CONFIG_BFQ_GROUP_IOSCHED */
1497