xref: /openbmc/linux/block/bfq-cgroup.c (revision 323dd2c3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * cgroups support for the BFQ I/O scheduler.
4  */
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/cgroup.h>
9 #include <linux/elevator.h>
10 #include <linux/ktime.h>
11 #include <linux/rbtree.h>
12 #include <linux/ioprio.h>
13 #include <linux/sbitmap.h>
14 #include <linux/delay.h>
15 
16 #include "bfq-iosched.h"
17 
18 #ifdef CONFIG_BFQ_CGROUP_DEBUG
19 static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
20 {
21 	int ret;
22 
23 	ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
24 	if (ret)
25 		return ret;
26 
27 	atomic64_set(&stat->aux_cnt, 0);
28 	return 0;
29 }
30 
31 static void bfq_stat_exit(struct bfq_stat *stat)
32 {
33 	percpu_counter_destroy(&stat->cpu_cnt);
34 }
35 
36 /**
37  * bfq_stat_add - add a value to a bfq_stat
38  * @stat: target bfq_stat
39  * @val: value to add
40  *
41  * Add @val to @stat.  The caller must ensure that IRQ on the same CPU
42  * don't re-enter this function for the same counter.
43  */
44 static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
45 {
46 	percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
47 }
48 
49 /**
50  * bfq_stat_read - read the current value of a bfq_stat
51  * @stat: bfq_stat to read
52  */
53 static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
54 {
55 	return percpu_counter_sum_positive(&stat->cpu_cnt);
56 }
57 
58 /**
59  * bfq_stat_reset - reset a bfq_stat
60  * @stat: bfq_stat to reset
61  */
62 static inline void bfq_stat_reset(struct bfq_stat *stat)
63 {
64 	percpu_counter_set(&stat->cpu_cnt, 0);
65 	atomic64_set(&stat->aux_cnt, 0);
66 }
67 
68 /**
69  * bfq_stat_add_aux - add a bfq_stat into another's aux count
70  * @to: the destination bfq_stat
71  * @from: the source
72  *
73  * Add @from's count including the aux one to @to's aux count.
74  */
75 static inline void bfq_stat_add_aux(struct bfq_stat *to,
76 				     struct bfq_stat *from)
77 {
78 	atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
79 		     &to->aux_cnt);
80 }
81 
82 /**
83  * blkg_prfill_stat - prfill callback for bfq_stat
84  * @sf: seq_file to print to
85  * @pd: policy private data of interest
86  * @off: offset to the bfq_stat in @pd
87  *
88  * prfill callback for printing a bfq_stat.
89  */
90 static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
91 		int off)
92 {
93 	return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
94 }
95 
96 /* bfqg stats flags */
97 enum bfqg_stats_flags {
98 	BFQG_stats_waiting = 0,
99 	BFQG_stats_idling,
100 	BFQG_stats_empty,
101 };
102 
103 #define BFQG_FLAG_FNS(name)						\
104 static void bfqg_stats_mark_##name(struct bfqg_stats *stats)	\
105 {									\
106 	stats->flags |= (1 << BFQG_stats_##name);			\
107 }									\
108 static void bfqg_stats_clear_##name(struct bfqg_stats *stats)	\
109 {									\
110 	stats->flags &= ~(1 << BFQG_stats_##name);			\
111 }									\
112 static int bfqg_stats_##name(struct bfqg_stats *stats)		\
113 {									\
114 	return (stats->flags & (1 << BFQG_stats_##name)) != 0;		\
115 }									\
116 
117 BFQG_FLAG_FNS(waiting)
118 BFQG_FLAG_FNS(idling)
119 BFQG_FLAG_FNS(empty)
120 #undef BFQG_FLAG_FNS
121 
122 /* This should be called with the scheduler lock held. */
123 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
124 {
125 	u64 now;
126 
127 	if (!bfqg_stats_waiting(stats))
128 		return;
129 
130 	now = ktime_get_ns();
131 	if (now > stats->start_group_wait_time)
132 		bfq_stat_add(&stats->group_wait_time,
133 			      now - stats->start_group_wait_time);
134 	bfqg_stats_clear_waiting(stats);
135 }
136 
137 /* This should be called with the scheduler lock held. */
138 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
139 						 struct bfq_group *curr_bfqg)
140 {
141 	struct bfqg_stats *stats = &bfqg->stats;
142 
143 	if (bfqg_stats_waiting(stats))
144 		return;
145 	if (bfqg == curr_bfqg)
146 		return;
147 	stats->start_group_wait_time = ktime_get_ns();
148 	bfqg_stats_mark_waiting(stats);
149 }
150 
151 /* This should be called with the scheduler lock held. */
152 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
153 {
154 	u64 now;
155 
156 	if (!bfqg_stats_empty(stats))
157 		return;
158 
159 	now = ktime_get_ns();
160 	if (now > stats->start_empty_time)
161 		bfq_stat_add(&stats->empty_time,
162 			      now - stats->start_empty_time);
163 	bfqg_stats_clear_empty(stats);
164 }
165 
166 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
167 {
168 	bfq_stat_add(&bfqg->stats.dequeue, 1);
169 }
170 
171 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
172 {
173 	struct bfqg_stats *stats = &bfqg->stats;
174 
175 	if (blkg_rwstat_total(&stats->queued))
176 		return;
177 
178 	/*
179 	 * group is already marked empty. This can happen if bfqq got new
180 	 * request in parent group and moved to this group while being added
181 	 * to service tree. Just ignore the event and move on.
182 	 */
183 	if (bfqg_stats_empty(stats))
184 		return;
185 
186 	stats->start_empty_time = ktime_get_ns();
187 	bfqg_stats_mark_empty(stats);
188 }
189 
190 void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
191 {
192 	struct bfqg_stats *stats = &bfqg->stats;
193 
194 	if (bfqg_stats_idling(stats)) {
195 		u64 now = ktime_get_ns();
196 
197 		if (now > stats->start_idle_time)
198 			bfq_stat_add(&stats->idle_time,
199 				      now - stats->start_idle_time);
200 		bfqg_stats_clear_idling(stats);
201 	}
202 }
203 
204 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
205 {
206 	struct bfqg_stats *stats = &bfqg->stats;
207 
208 	stats->start_idle_time = ktime_get_ns();
209 	bfqg_stats_mark_idling(stats);
210 }
211 
212 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
213 {
214 	struct bfqg_stats *stats = &bfqg->stats;
215 
216 	bfq_stat_add(&stats->avg_queue_size_sum,
217 		      blkg_rwstat_total(&stats->queued));
218 	bfq_stat_add(&stats->avg_queue_size_samples, 1);
219 	bfqg_stats_update_group_wait_time(stats);
220 }
221 
222 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
223 			      unsigned int op)
224 {
225 	blkg_rwstat_add(&bfqg->stats.queued, op, 1);
226 	bfqg_stats_end_empty_time(&bfqg->stats);
227 	if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
228 		bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
229 }
230 
231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
232 {
233 	blkg_rwstat_add(&bfqg->stats.queued, op, -1);
234 }
235 
236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
237 {
238 	blkg_rwstat_add(&bfqg->stats.merged, op, 1);
239 }
240 
241 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
242 				  u64 io_start_time_ns, unsigned int op)
243 {
244 	struct bfqg_stats *stats = &bfqg->stats;
245 	u64 now = ktime_get_ns();
246 
247 	if (now > io_start_time_ns)
248 		blkg_rwstat_add(&stats->service_time, op,
249 				now - io_start_time_ns);
250 	if (io_start_time_ns > start_time_ns)
251 		blkg_rwstat_add(&stats->wait_time, op,
252 				io_start_time_ns - start_time_ns);
253 }
254 
255 #else /* CONFIG_BFQ_CGROUP_DEBUG */
256 
257 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
258 			      unsigned int op) { }
259 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
260 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
261 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
262 				  u64 io_start_time_ns, unsigned int op) { }
263 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
264 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
265 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
266 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
267 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
268 
269 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
270 
271 #ifdef CONFIG_BFQ_GROUP_IOSCHED
272 
273 /*
274  * blk-cgroup policy-related handlers
275  * The following functions help in converting between blk-cgroup
276  * internal structures and BFQ-specific structures.
277  */
278 
279 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
280 {
281 	return pd ? container_of(pd, struct bfq_group, pd) : NULL;
282 }
283 
284 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
285 {
286 	return pd_to_blkg(&bfqg->pd);
287 }
288 
289 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
290 {
291 	return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
292 }
293 
294 /*
295  * bfq_group handlers
296  * The following functions help in navigating the bfq_group hierarchy
297  * by allowing to find the parent of a bfq_group or the bfq_group
298  * associated to a bfq_queue.
299  */
300 
301 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
302 {
303 	struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
304 
305 	return pblkg ? blkg_to_bfqg(pblkg) : NULL;
306 }
307 
308 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
309 {
310 	struct bfq_entity *group_entity = bfqq->entity.parent;
311 
312 	return group_entity ? container_of(group_entity, struct bfq_group,
313 					   entity) :
314 			      bfqq->bfqd->root_group;
315 }
316 
317 /*
318  * The following two functions handle get and put of a bfq_group by
319  * wrapping the related blk-cgroup hooks.
320  */
321 
322 static void bfqg_get(struct bfq_group *bfqg)
323 {
324 	bfqg->ref++;
325 }
326 
327 static void bfqg_put(struct bfq_group *bfqg)
328 {
329 	bfqg->ref--;
330 
331 	if (bfqg->ref == 0)
332 		kfree(bfqg);
333 }
334 
335 static void bfqg_and_blkg_get(struct bfq_group *bfqg)
336 {
337 	/* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
338 	bfqg_get(bfqg);
339 
340 	blkg_get(bfqg_to_blkg(bfqg));
341 }
342 
343 void bfqg_and_blkg_put(struct bfq_group *bfqg)
344 {
345 	blkg_put(bfqg_to_blkg(bfqg));
346 
347 	bfqg_put(bfqg);
348 }
349 
350 void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
351 {
352 	struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
353 
354 	blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
355 	blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
356 }
357 
358 /* @stats = 0 */
359 static void bfqg_stats_reset(struct bfqg_stats *stats)
360 {
361 #ifdef CONFIG_BFQ_CGROUP_DEBUG
362 	/* queued stats shouldn't be cleared */
363 	blkg_rwstat_reset(&stats->merged);
364 	blkg_rwstat_reset(&stats->service_time);
365 	blkg_rwstat_reset(&stats->wait_time);
366 	bfq_stat_reset(&stats->time);
367 	bfq_stat_reset(&stats->avg_queue_size_sum);
368 	bfq_stat_reset(&stats->avg_queue_size_samples);
369 	bfq_stat_reset(&stats->dequeue);
370 	bfq_stat_reset(&stats->group_wait_time);
371 	bfq_stat_reset(&stats->idle_time);
372 	bfq_stat_reset(&stats->empty_time);
373 #endif
374 }
375 
376 /* @to += @from */
377 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
378 {
379 	if (!to || !from)
380 		return;
381 
382 #ifdef CONFIG_BFQ_CGROUP_DEBUG
383 	/* queued stats shouldn't be cleared */
384 	blkg_rwstat_add_aux(&to->merged, &from->merged);
385 	blkg_rwstat_add_aux(&to->service_time, &from->service_time);
386 	blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
387 	bfq_stat_add_aux(&from->time, &from->time);
388 	bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
389 	bfq_stat_add_aux(&to->avg_queue_size_samples,
390 			  &from->avg_queue_size_samples);
391 	bfq_stat_add_aux(&to->dequeue, &from->dequeue);
392 	bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
393 	bfq_stat_add_aux(&to->idle_time, &from->idle_time);
394 	bfq_stat_add_aux(&to->empty_time, &from->empty_time);
395 #endif
396 }
397 
398 /*
399  * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
400  * recursive stats can still account for the amount used by this bfqg after
401  * it's gone.
402  */
403 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
404 {
405 	struct bfq_group *parent;
406 
407 	if (!bfqg) /* root_group */
408 		return;
409 
410 	parent = bfqg_parent(bfqg);
411 
412 	lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
413 
414 	if (unlikely(!parent))
415 		return;
416 
417 	bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
418 	bfqg_stats_reset(&bfqg->stats);
419 }
420 
421 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
422 {
423 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
424 
425 	entity->weight = entity->new_weight;
426 	entity->orig_weight = entity->new_weight;
427 	if (bfqq) {
428 		bfqq->ioprio = bfqq->new_ioprio;
429 		bfqq->ioprio_class = bfqq->new_ioprio_class;
430 		/*
431 		 * Make sure that bfqg and its associated blkg do not
432 		 * disappear before entity.
433 		 */
434 		bfqg_and_blkg_get(bfqg);
435 	}
436 	entity->parent = bfqg->my_entity; /* NULL for root group */
437 	entity->sched_data = &bfqg->sched_data;
438 }
439 
440 static void bfqg_stats_exit(struct bfqg_stats *stats)
441 {
442 	blkg_rwstat_exit(&stats->bytes);
443 	blkg_rwstat_exit(&stats->ios);
444 #ifdef CONFIG_BFQ_CGROUP_DEBUG
445 	blkg_rwstat_exit(&stats->merged);
446 	blkg_rwstat_exit(&stats->service_time);
447 	blkg_rwstat_exit(&stats->wait_time);
448 	blkg_rwstat_exit(&stats->queued);
449 	bfq_stat_exit(&stats->time);
450 	bfq_stat_exit(&stats->avg_queue_size_sum);
451 	bfq_stat_exit(&stats->avg_queue_size_samples);
452 	bfq_stat_exit(&stats->dequeue);
453 	bfq_stat_exit(&stats->group_wait_time);
454 	bfq_stat_exit(&stats->idle_time);
455 	bfq_stat_exit(&stats->empty_time);
456 #endif
457 }
458 
459 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
460 {
461 	if (blkg_rwstat_init(&stats->bytes, gfp) ||
462 	    blkg_rwstat_init(&stats->ios, gfp))
463 		return -ENOMEM;
464 
465 #ifdef CONFIG_BFQ_CGROUP_DEBUG
466 	if (blkg_rwstat_init(&stats->merged, gfp) ||
467 	    blkg_rwstat_init(&stats->service_time, gfp) ||
468 	    blkg_rwstat_init(&stats->wait_time, gfp) ||
469 	    blkg_rwstat_init(&stats->queued, gfp) ||
470 	    bfq_stat_init(&stats->time, gfp) ||
471 	    bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
472 	    bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
473 	    bfq_stat_init(&stats->dequeue, gfp) ||
474 	    bfq_stat_init(&stats->group_wait_time, gfp) ||
475 	    bfq_stat_init(&stats->idle_time, gfp) ||
476 	    bfq_stat_init(&stats->empty_time, gfp)) {
477 		bfqg_stats_exit(stats);
478 		return -ENOMEM;
479 	}
480 #endif
481 
482 	return 0;
483 }
484 
485 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
486 {
487 	return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
488 }
489 
490 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
491 {
492 	return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
493 }
494 
495 static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
496 {
497 	struct bfq_group_data *bgd;
498 
499 	bgd = kzalloc(sizeof(*bgd), gfp);
500 	if (!bgd)
501 		return NULL;
502 	return &bgd->pd;
503 }
504 
505 static void bfq_cpd_init(struct blkcg_policy_data *cpd)
506 {
507 	struct bfq_group_data *d = cpd_to_bfqgd(cpd);
508 
509 	d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
510 		CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
511 }
512 
513 static void bfq_cpd_free(struct blkcg_policy_data *cpd)
514 {
515 	kfree(cpd_to_bfqgd(cpd));
516 }
517 
518 static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
519 					     struct blkcg *blkcg)
520 {
521 	struct bfq_group *bfqg;
522 
523 	bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node);
524 	if (!bfqg)
525 		return NULL;
526 
527 	if (bfqg_stats_init(&bfqg->stats, gfp)) {
528 		kfree(bfqg);
529 		return NULL;
530 	}
531 
532 	/* see comments in bfq_bic_update_cgroup for why refcounting */
533 	bfqg_get(bfqg);
534 	return &bfqg->pd;
535 }
536 
537 static void bfq_pd_init(struct blkg_policy_data *pd)
538 {
539 	struct blkcg_gq *blkg = pd_to_blkg(pd);
540 	struct bfq_group *bfqg = blkg_to_bfqg(blkg);
541 	struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
542 	struct bfq_entity *entity = &bfqg->entity;
543 	struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
544 
545 	entity->orig_weight = entity->weight = entity->new_weight = d->weight;
546 	entity->my_sched_data = &bfqg->sched_data;
547 	bfqg->my_entity = entity; /*
548 				   * the root_group's will be set to NULL
549 				   * in bfq_init_queue()
550 				   */
551 	bfqg->bfqd = bfqd;
552 	bfqg->active_entities = 0;
553 	bfqg->rq_pos_tree = RB_ROOT;
554 }
555 
556 static void bfq_pd_free(struct blkg_policy_data *pd)
557 {
558 	struct bfq_group *bfqg = pd_to_bfqg(pd);
559 
560 	bfqg_stats_exit(&bfqg->stats);
561 	bfqg_put(bfqg);
562 }
563 
564 static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
565 {
566 	struct bfq_group *bfqg = pd_to_bfqg(pd);
567 
568 	bfqg_stats_reset(&bfqg->stats);
569 }
570 
571 static void bfq_group_set_parent(struct bfq_group *bfqg,
572 					struct bfq_group *parent)
573 {
574 	struct bfq_entity *entity;
575 
576 	entity = &bfqg->entity;
577 	entity->parent = parent->my_entity;
578 	entity->sched_data = &parent->sched_data;
579 }
580 
581 static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
582 					 struct blkcg *blkcg)
583 {
584 	struct blkcg_gq *blkg;
585 
586 	blkg = blkg_lookup(blkcg, bfqd->queue);
587 	if (likely(blkg))
588 		return blkg_to_bfqg(blkg);
589 	return NULL;
590 }
591 
592 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
593 				     struct blkcg *blkcg)
594 {
595 	struct bfq_group *bfqg, *parent;
596 	struct bfq_entity *entity;
597 
598 	bfqg = bfq_lookup_bfqg(bfqd, blkcg);
599 
600 	if (unlikely(!bfqg))
601 		return NULL;
602 
603 	/*
604 	 * Update chain of bfq_groups as we might be handling a leaf group
605 	 * which, along with some of its relatives, has not been hooked yet
606 	 * to the private hierarchy of BFQ.
607 	 */
608 	entity = &bfqg->entity;
609 	for_each_entity(entity) {
610 		bfqg = container_of(entity, struct bfq_group, entity);
611 		if (bfqg != bfqd->root_group) {
612 			parent = bfqg_parent(bfqg);
613 			if (!parent)
614 				parent = bfqd->root_group;
615 			bfq_group_set_parent(bfqg, parent);
616 		}
617 	}
618 
619 	return bfqg;
620 }
621 
622 /**
623  * bfq_bfqq_move - migrate @bfqq to @bfqg.
624  * @bfqd: queue descriptor.
625  * @bfqq: the queue to move.
626  * @bfqg: the group to move to.
627  *
628  * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
629  * it on the new one.  Avoid putting the entity on the old group idle tree.
630  *
631  * Must be called under the scheduler lock, to make sure that the blkg
632  * owning @bfqg does not disappear (see comments in
633  * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
634  * objects).
635  */
636 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
637 		   struct bfq_group *bfqg)
638 {
639 	struct bfq_entity *entity = &bfqq->entity;
640 
641 	/* If bfqq is empty, then bfq_bfqq_expire also invokes
642 	 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
643 	 * from data structures related to current group. Otherwise we
644 	 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
645 	 * we do below.
646 	 */
647 	if (bfqq == bfqd->in_service_queue)
648 		bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
649 				false, BFQQE_PREEMPTED);
650 
651 	if (bfq_bfqq_busy(bfqq))
652 		bfq_deactivate_bfqq(bfqd, bfqq, false, false);
653 	else if (entity->on_st)
654 		bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
655 	bfqg_and_blkg_put(bfqq_group(bfqq));
656 
657 	entity->parent = bfqg->my_entity;
658 	entity->sched_data = &bfqg->sched_data;
659 	/* pin down bfqg and its associated blkg  */
660 	bfqg_and_blkg_get(bfqg);
661 
662 	if (bfq_bfqq_busy(bfqq)) {
663 		if (unlikely(!bfqd->nonrot_with_queueing))
664 			bfq_pos_tree_add_move(bfqd, bfqq);
665 		bfq_activate_bfqq(bfqd, bfqq);
666 	}
667 
668 	if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
669 		bfq_schedule_dispatch(bfqd);
670 }
671 
672 /**
673  * __bfq_bic_change_cgroup - move @bic to @cgroup.
674  * @bfqd: the queue descriptor.
675  * @bic: the bic to move.
676  * @blkcg: the blk-cgroup to move to.
677  *
678  * Move bic to blkcg, assuming that bfqd->lock is held; which makes
679  * sure that the reference to cgroup is valid across the call (see
680  * comments in bfq_bic_update_cgroup on this issue)
681  *
682  * NOTE: an alternative approach might have been to store the current
683  * cgroup in bfqq and getting a reference to it, reducing the lookup
684  * time here, at the price of slightly more complex code.
685  */
686 static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
687 						struct bfq_io_cq *bic,
688 						struct blkcg *blkcg)
689 {
690 	struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
691 	struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
692 	struct bfq_group *bfqg;
693 	struct bfq_entity *entity;
694 
695 	bfqg = bfq_find_set_group(bfqd, blkcg);
696 
697 	if (unlikely(!bfqg))
698 		bfqg = bfqd->root_group;
699 
700 	if (async_bfqq) {
701 		entity = &async_bfqq->entity;
702 
703 		if (entity->sched_data != &bfqg->sched_data) {
704 			bic_set_bfqq(bic, NULL, 0);
705 			bfq_log_bfqq(bfqd, async_bfqq,
706 				     "bic_change_group: %p %d",
707 				     async_bfqq, async_bfqq->ref);
708 			bfq_put_queue(async_bfqq);
709 		}
710 	}
711 
712 	if (sync_bfqq) {
713 		entity = &sync_bfqq->entity;
714 		if (entity->sched_data != &bfqg->sched_data)
715 			bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
716 	}
717 
718 	return bfqg;
719 }
720 
721 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
722 {
723 	struct bfq_data *bfqd = bic_to_bfqd(bic);
724 	struct bfq_group *bfqg = NULL;
725 	uint64_t serial_nr;
726 
727 	rcu_read_lock();
728 	serial_nr = __bio_blkcg(bio)->css.serial_nr;
729 
730 	/*
731 	 * Check whether blkcg has changed.  The condition may trigger
732 	 * spuriously on a newly created cic but there's no harm.
733 	 */
734 	if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
735 		goto out;
736 
737 	bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
738 	/*
739 	 * Update blkg_path for bfq_log_* functions. We cache this
740 	 * path, and update it here, for the following
741 	 * reasons. Operations on blkg objects in blk-cgroup are
742 	 * protected with the request_queue lock, and not with the
743 	 * lock that protects the instances of this scheduler
744 	 * (bfqd->lock). This exposes BFQ to the following sort of
745 	 * race.
746 	 *
747 	 * The blkg_lookup performed in bfq_get_queue, protected
748 	 * through rcu, may happen to return the address of a copy of
749 	 * the original blkg. If this is the case, then the
750 	 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
751 	 * the blkg, is useless: it does not prevent blk-cgroup code
752 	 * from destroying both the original blkg and all objects
753 	 * directly or indirectly referred by the copy of the
754 	 * blkg.
755 	 *
756 	 * On the bright side, destroy operations on a blkg invoke, as
757 	 * a first step, hooks of the scheduler associated with the
758 	 * blkg. And these hooks are executed with bfqd->lock held for
759 	 * BFQ. As a consequence, for any blkg associated with the
760 	 * request queue this instance of the scheduler is attached
761 	 * to, we are guaranteed that such a blkg is not destroyed, and
762 	 * that all the pointers it contains are consistent, while we
763 	 * are holding bfqd->lock. A blkg_lookup performed with
764 	 * bfqd->lock held then returns a fully consistent blkg, which
765 	 * remains consistent until this lock is held.
766 	 *
767 	 * Thanks to the last fact, and to the fact that: (1) bfqg has
768 	 * been obtained through a blkg_lookup in the above
769 	 * assignment, and (2) bfqd->lock is being held, here we can
770 	 * safely use the policy data for the involved blkg (i.e., the
771 	 * field bfqg->pd) to get to the blkg associated with bfqg,
772 	 * and then we can safely use any field of blkg. After we
773 	 * release bfqd->lock, even just getting blkg through this
774 	 * bfqg may cause dangling references to be traversed, as
775 	 * bfqg->pd may not exist any more.
776 	 *
777 	 * In view of the above facts, here we cache, in the bfqg, any
778 	 * blkg data we may need for this bic, and for its associated
779 	 * bfq_queue. As of now, we need to cache only the path of the
780 	 * blkg, which is used in the bfq_log_* functions.
781 	 *
782 	 * Finally, note that bfqg itself needs to be protected from
783 	 * destruction on the blkg_free of the original blkg (which
784 	 * invokes bfq_pd_free). We use an additional private
785 	 * refcounter for bfqg, to let it disappear only after no
786 	 * bfq_queue refers to it any longer.
787 	 */
788 	blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
789 	bic->blkcg_serial_nr = serial_nr;
790 out:
791 	rcu_read_unlock();
792 }
793 
794 /**
795  * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
796  * @st: the service tree being flushed.
797  */
798 static void bfq_flush_idle_tree(struct bfq_service_tree *st)
799 {
800 	struct bfq_entity *entity = st->first_idle;
801 
802 	for (; entity ; entity = st->first_idle)
803 		__bfq_deactivate_entity(entity, false);
804 }
805 
806 /**
807  * bfq_reparent_leaf_entity - move leaf entity to the root_group.
808  * @bfqd: the device data structure with the root group.
809  * @entity: the entity to move.
810  */
811 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
812 				     struct bfq_entity *entity)
813 {
814 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
815 
816 	bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
817 }
818 
819 /**
820  * bfq_reparent_active_entities - move to the root group all active
821  *                                entities.
822  * @bfqd: the device data structure with the root group.
823  * @bfqg: the group to move from.
824  * @st: the service tree with the entities.
825  */
826 static void bfq_reparent_active_entities(struct bfq_data *bfqd,
827 					 struct bfq_group *bfqg,
828 					 struct bfq_service_tree *st)
829 {
830 	struct rb_root *active = &st->active;
831 	struct bfq_entity *entity = NULL;
832 
833 	if (!RB_EMPTY_ROOT(&st->active))
834 		entity = bfq_entity_of(rb_first(active));
835 
836 	for (; entity ; entity = bfq_entity_of(rb_first(active)))
837 		bfq_reparent_leaf_entity(bfqd, entity);
838 
839 	if (bfqg->sched_data.in_service_entity)
840 		bfq_reparent_leaf_entity(bfqd,
841 			bfqg->sched_data.in_service_entity);
842 }
843 
844 /**
845  * bfq_pd_offline - deactivate the entity associated with @pd,
846  *		    and reparent its children entities.
847  * @pd: descriptor of the policy going offline.
848  *
849  * blkio already grabs the queue_lock for us, so no need to use
850  * RCU-based magic
851  */
852 static void bfq_pd_offline(struct blkg_policy_data *pd)
853 {
854 	struct bfq_service_tree *st;
855 	struct bfq_group *bfqg = pd_to_bfqg(pd);
856 	struct bfq_data *bfqd = bfqg->bfqd;
857 	struct bfq_entity *entity = bfqg->my_entity;
858 	unsigned long flags;
859 	int i;
860 
861 	spin_lock_irqsave(&bfqd->lock, flags);
862 
863 	if (!entity) /* root group */
864 		goto put_async_queues;
865 
866 	/*
867 	 * Empty all service_trees belonging to this group before
868 	 * deactivating the group itself.
869 	 */
870 	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
871 		st = bfqg->sched_data.service_tree + i;
872 
873 		/*
874 		 * The idle tree may still contain bfq_queues belonging
875 		 * to exited task because they never migrated to a different
876 		 * cgroup from the one being destroyed now.
877 		 */
878 		bfq_flush_idle_tree(st);
879 
880 		/*
881 		 * It may happen that some queues are still active
882 		 * (busy) upon group destruction (if the corresponding
883 		 * processes have been forced to terminate). We move
884 		 * all the leaf entities corresponding to these queues
885 		 * to the root_group.
886 		 * Also, it may happen that the group has an entity
887 		 * in service, which is disconnected from the active
888 		 * tree: it must be moved, too.
889 		 * There is no need to put the sync queues, as the
890 		 * scheduler has taken no reference.
891 		 */
892 		bfq_reparent_active_entities(bfqd, bfqg, st);
893 	}
894 
895 	__bfq_deactivate_entity(entity, false);
896 
897 put_async_queues:
898 	bfq_put_async_queues(bfqd, bfqg);
899 
900 	spin_unlock_irqrestore(&bfqd->lock, flags);
901 	/*
902 	 * @blkg is going offline and will be ignored by
903 	 * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
904 	 * that they don't get lost.  If IOs complete after this point, the
905 	 * stats for them will be lost.  Oh well...
906 	 */
907 	bfqg_stats_xfer_dead(bfqg);
908 }
909 
910 void bfq_end_wr_async(struct bfq_data *bfqd)
911 {
912 	struct blkcg_gq *blkg;
913 
914 	list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
915 		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
916 
917 		bfq_end_wr_async_queues(bfqd, bfqg);
918 	}
919 	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
920 }
921 
922 static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
923 {
924 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
925 	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
926 	unsigned int val = 0;
927 
928 	if (bfqgd)
929 		val = bfqgd->weight;
930 
931 	seq_printf(sf, "%u\n", val);
932 
933 	return 0;
934 }
935 
936 static u64 bfqg_prfill_weight_device(struct seq_file *sf,
937 				     struct blkg_policy_data *pd, int off)
938 {
939 	struct bfq_group *bfqg = pd_to_bfqg(pd);
940 
941 	if (!bfqg->entity.dev_weight)
942 		return 0;
943 	return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
944 }
945 
946 static int bfq_io_show_weight(struct seq_file *sf, void *v)
947 {
948 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
949 	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
950 
951 	seq_printf(sf, "default %u\n", bfqgd->weight);
952 	blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
953 			  &blkcg_policy_bfq, 0, false);
954 	return 0;
955 }
956 
957 static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
958 {
959 	weight = dev_weight ?: weight;
960 
961 	bfqg->entity.dev_weight = dev_weight;
962 	/*
963 	 * Setting the prio_changed flag of the entity
964 	 * to 1 with new_weight == weight would re-set
965 	 * the value of the weight to its ioprio mapping.
966 	 * Set the flag only if necessary.
967 	 */
968 	if ((unsigned short)weight != bfqg->entity.new_weight) {
969 		bfqg->entity.new_weight = (unsigned short)weight;
970 		/*
971 		 * Make sure that the above new value has been
972 		 * stored in bfqg->entity.new_weight before
973 		 * setting the prio_changed flag. In fact,
974 		 * this flag may be read asynchronously (in
975 		 * critical sections protected by a different
976 		 * lock than that held here), and finding this
977 		 * flag set may cause the execution of the code
978 		 * for updating parameters whose value may
979 		 * depend also on bfqg->entity.new_weight (in
980 		 * __bfq_entity_update_weight_prio).
981 		 * This barrier makes sure that the new value
982 		 * of bfqg->entity.new_weight is correctly
983 		 * seen in that code.
984 		 */
985 		smp_wmb();
986 		bfqg->entity.prio_changed = 1;
987 	}
988 }
989 
990 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
991 				    struct cftype *cftype,
992 				    u64 val)
993 {
994 	struct blkcg *blkcg = css_to_blkcg(css);
995 	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
996 	struct blkcg_gq *blkg;
997 	int ret = -ERANGE;
998 
999 	if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
1000 		return ret;
1001 
1002 	ret = 0;
1003 	spin_lock_irq(&blkcg->lock);
1004 	bfqgd->weight = (unsigned short)val;
1005 	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1006 		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1007 
1008 		if (bfqg)
1009 			bfq_group_set_weight(bfqg, val, 0);
1010 	}
1011 	spin_unlock_irq(&blkcg->lock);
1012 
1013 	return ret;
1014 }
1015 
1016 static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
1017 					char *buf, size_t nbytes,
1018 					loff_t off)
1019 {
1020 	int ret;
1021 	struct blkg_conf_ctx ctx;
1022 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1023 	struct bfq_group *bfqg;
1024 	u64 v;
1025 
1026 	ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx);
1027 	if (ret)
1028 		return ret;
1029 
1030 	if (sscanf(ctx.body, "%llu", &v) == 1) {
1031 		/* require "default" on dfl */
1032 		ret = -ERANGE;
1033 		if (!v)
1034 			goto out;
1035 	} else if (!strcmp(strim(ctx.body), "default")) {
1036 		v = 0;
1037 	} else {
1038 		ret = -EINVAL;
1039 		goto out;
1040 	}
1041 
1042 	bfqg = blkg_to_bfqg(ctx.blkg);
1043 
1044 	ret = -ERANGE;
1045 	if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
1046 		bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1047 		ret = 0;
1048 	}
1049 out:
1050 	blkg_conf_finish(&ctx);
1051 	return ret ?: nbytes;
1052 }
1053 
1054 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
1055 				 char *buf, size_t nbytes,
1056 				 loff_t off)
1057 {
1058 	char *endp;
1059 	int ret;
1060 	u64 v;
1061 
1062 	buf = strim(buf);
1063 
1064 	/* "WEIGHT" or "default WEIGHT" sets the default weight */
1065 	v = simple_strtoull(buf, &endp, 0);
1066 	if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
1067 		ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
1068 		return ret ?: nbytes;
1069 	}
1070 
1071 	return bfq_io_set_device_weight(of, buf, nbytes, off);
1072 }
1073 
1074 static int bfqg_print_rwstat(struct seq_file *sf, void *v)
1075 {
1076 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1077 			  &blkcg_policy_bfq, seq_cft(sf)->private, true);
1078 	return 0;
1079 }
1080 
1081 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
1082 					struct blkg_policy_data *pd, int off)
1083 {
1084 	struct blkg_rwstat_sample sum;
1085 
1086 	blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
1087 	return __blkg_prfill_rwstat(sf, pd, &sum);
1088 }
1089 
1090 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1091 {
1092 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1093 			  bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
1094 			  seq_cft(sf)->private, true);
1095 	return 0;
1096 }
1097 
1098 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1099 static int bfqg_print_stat(struct seq_file *sf, void *v)
1100 {
1101 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1102 			  &blkcg_policy_bfq, seq_cft(sf)->private, false);
1103 	return 0;
1104 }
1105 
1106 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
1107 				      struct blkg_policy_data *pd, int off)
1108 {
1109 	struct blkcg_gq *blkg = pd_to_blkg(pd);
1110 	struct blkcg_gq *pos_blkg;
1111 	struct cgroup_subsys_state *pos_css;
1112 	u64 sum = 0;
1113 
1114 	lockdep_assert_held(&blkg->q->queue_lock);
1115 
1116 	rcu_read_lock();
1117 	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1118 		struct bfq_stat *stat;
1119 
1120 		if (!pos_blkg->online)
1121 			continue;
1122 
1123 		stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
1124 		sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
1125 	}
1126 	rcu_read_unlock();
1127 
1128 	return __blkg_prfill_u64(sf, pd, sum);
1129 }
1130 
1131 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
1132 {
1133 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1134 			  bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
1135 			  seq_cft(sf)->private, false);
1136 	return 0;
1137 }
1138 
1139 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1140 			       int off)
1141 {
1142 	struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
1143 	u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
1144 
1145 	return __blkg_prfill_u64(sf, pd, sum >> 9);
1146 }
1147 
1148 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
1149 {
1150 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1151 			  bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
1152 	return 0;
1153 }
1154 
1155 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
1156 					 struct blkg_policy_data *pd, int off)
1157 {
1158 	struct blkg_rwstat_sample tmp;
1159 
1160 	blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
1161 			offsetof(struct bfq_group, stats.bytes), &tmp);
1162 
1163 	return __blkg_prfill_u64(sf, pd,
1164 		(tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
1165 }
1166 
1167 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1168 {
1169 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1170 			  bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
1171 			  false);
1172 	return 0;
1173 }
1174 
1175 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1176 				      struct blkg_policy_data *pd, int off)
1177 {
1178 	struct bfq_group *bfqg = pd_to_bfqg(pd);
1179 	u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
1180 	u64 v = 0;
1181 
1182 	if (samples) {
1183 		v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
1184 		v = div64_u64(v, samples);
1185 	}
1186 	__blkg_prfill_u64(sf, pd, v);
1187 	return 0;
1188 }
1189 
1190 /* print avg_queue_size */
1191 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1192 {
1193 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1194 			  bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1195 			  0, false);
1196 	return 0;
1197 }
1198 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1199 
1200 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1201 {
1202 	int ret;
1203 
1204 	ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
1205 	if (ret)
1206 		return NULL;
1207 
1208 	return blkg_to_bfqg(bfqd->queue->root_blkg);
1209 }
1210 
1211 struct blkcg_policy blkcg_policy_bfq = {
1212 	.dfl_cftypes		= bfq_blkg_files,
1213 	.legacy_cftypes		= bfq_blkcg_legacy_files,
1214 
1215 	.cpd_alloc_fn		= bfq_cpd_alloc,
1216 	.cpd_init_fn		= bfq_cpd_init,
1217 	.cpd_bind_fn	        = bfq_cpd_init,
1218 	.cpd_free_fn		= bfq_cpd_free,
1219 
1220 	.pd_alloc_fn		= bfq_pd_alloc,
1221 	.pd_init_fn		= bfq_pd_init,
1222 	.pd_offline_fn		= bfq_pd_offline,
1223 	.pd_free_fn		= bfq_pd_free,
1224 	.pd_reset_stats_fn	= bfq_pd_reset_stats,
1225 };
1226 
1227 struct cftype bfq_blkcg_legacy_files[] = {
1228 	{
1229 		.name = "bfq.weight",
1230 		.flags = CFTYPE_NOT_ON_ROOT,
1231 		.seq_show = bfq_io_show_weight_legacy,
1232 		.write_u64 = bfq_io_set_weight_legacy,
1233 	},
1234 	{
1235 		.name = "bfq.weight_device",
1236 		.flags = CFTYPE_NOT_ON_ROOT,
1237 		.seq_show = bfq_io_show_weight,
1238 		.write = bfq_io_set_weight,
1239 	},
1240 
1241 	/* statistics, covers only the tasks in the bfqg */
1242 	{
1243 		.name = "bfq.io_service_bytes",
1244 		.private = offsetof(struct bfq_group, stats.bytes),
1245 		.seq_show = bfqg_print_rwstat,
1246 	},
1247 	{
1248 		.name = "bfq.io_serviced",
1249 		.private = offsetof(struct bfq_group, stats.ios),
1250 		.seq_show = bfqg_print_rwstat,
1251 	},
1252 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1253 	{
1254 		.name = "bfq.time",
1255 		.private = offsetof(struct bfq_group, stats.time),
1256 		.seq_show = bfqg_print_stat,
1257 	},
1258 	{
1259 		.name = "bfq.sectors",
1260 		.seq_show = bfqg_print_stat_sectors,
1261 	},
1262 	{
1263 		.name = "bfq.io_service_time",
1264 		.private = offsetof(struct bfq_group, stats.service_time),
1265 		.seq_show = bfqg_print_rwstat,
1266 	},
1267 	{
1268 		.name = "bfq.io_wait_time",
1269 		.private = offsetof(struct bfq_group, stats.wait_time),
1270 		.seq_show = bfqg_print_rwstat,
1271 	},
1272 	{
1273 		.name = "bfq.io_merged",
1274 		.private = offsetof(struct bfq_group, stats.merged),
1275 		.seq_show = bfqg_print_rwstat,
1276 	},
1277 	{
1278 		.name = "bfq.io_queued",
1279 		.private = offsetof(struct bfq_group, stats.queued),
1280 		.seq_show = bfqg_print_rwstat,
1281 	},
1282 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1283 
1284 	/* the same statistics which cover the bfqg and its descendants */
1285 	{
1286 		.name = "bfq.io_service_bytes_recursive",
1287 		.private = offsetof(struct bfq_group, stats.bytes),
1288 		.seq_show = bfqg_print_rwstat_recursive,
1289 	},
1290 	{
1291 		.name = "bfq.io_serviced_recursive",
1292 		.private = offsetof(struct bfq_group, stats.ios),
1293 		.seq_show = bfqg_print_rwstat_recursive,
1294 	},
1295 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1296 	{
1297 		.name = "bfq.time_recursive",
1298 		.private = offsetof(struct bfq_group, stats.time),
1299 		.seq_show = bfqg_print_stat_recursive,
1300 	},
1301 	{
1302 		.name = "bfq.sectors_recursive",
1303 		.seq_show = bfqg_print_stat_sectors_recursive,
1304 	},
1305 	{
1306 		.name = "bfq.io_service_time_recursive",
1307 		.private = offsetof(struct bfq_group, stats.service_time),
1308 		.seq_show = bfqg_print_rwstat_recursive,
1309 	},
1310 	{
1311 		.name = "bfq.io_wait_time_recursive",
1312 		.private = offsetof(struct bfq_group, stats.wait_time),
1313 		.seq_show = bfqg_print_rwstat_recursive,
1314 	},
1315 	{
1316 		.name = "bfq.io_merged_recursive",
1317 		.private = offsetof(struct bfq_group, stats.merged),
1318 		.seq_show = bfqg_print_rwstat_recursive,
1319 	},
1320 	{
1321 		.name = "bfq.io_queued_recursive",
1322 		.private = offsetof(struct bfq_group, stats.queued),
1323 		.seq_show = bfqg_print_rwstat_recursive,
1324 	},
1325 	{
1326 		.name = "bfq.avg_queue_size",
1327 		.seq_show = bfqg_print_avg_queue_size,
1328 	},
1329 	{
1330 		.name = "bfq.group_wait_time",
1331 		.private = offsetof(struct bfq_group, stats.group_wait_time),
1332 		.seq_show = bfqg_print_stat,
1333 	},
1334 	{
1335 		.name = "bfq.idle_time",
1336 		.private = offsetof(struct bfq_group, stats.idle_time),
1337 		.seq_show = bfqg_print_stat,
1338 	},
1339 	{
1340 		.name = "bfq.empty_time",
1341 		.private = offsetof(struct bfq_group, stats.empty_time),
1342 		.seq_show = bfqg_print_stat,
1343 	},
1344 	{
1345 		.name = "bfq.dequeue",
1346 		.private = offsetof(struct bfq_group, stats.dequeue),
1347 		.seq_show = bfqg_print_stat,
1348 	},
1349 #endif	/* CONFIG_BFQ_CGROUP_DEBUG */
1350 	{ }	/* terminate */
1351 };
1352 
1353 struct cftype bfq_blkg_files[] = {
1354 	{
1355 		.name = "bfq.weight",
1356 		.flags = CFTYPE_NOT_ON_ROOT,
1357 		.seq_show = bfq_io_show_weight,
1358 		.write = bfq_io_set_weight,
1359 	},
1360 	{} /* terminate */
1361 };
1362 
1363 #else	/* CONFIG_BFQ_GROUP_IOSCHED */
1364 
1365 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1366 		   struct bfq_group *bfqg) {}
1367 
1368 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1369 {
1370 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1371 
1372 	entity->weight = entity->new_weight;
1373 	entity->orig_weight = entity->new_weight;
1374 	if (bfqq) {
1375 		bfqq->ioprio = bfqq->new_ioprio;
1376 		bfqq->ioprio_class = bfqq->new_ioprio_class;
1377 	}
1378 	entity->sched_data = &bfqg->sched_data;
1379 }
1380 
1381 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1382 
1383 void bfq_end_wr_async(struct bfq_data *bfqd)
1384 {
1385 	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1386 }
1387 
1388 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
1389 {
1390 	return bfqd->root_group;
1391 }
1392 
1393 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1394 {
1395 	return bfqq->bfqd->root_group;
1396 }
1397 
1398 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1399 {
1400 	struct bfq_group *bfqg;
1401 	int i;
1402 
1403 	bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1404 	if (!bfqg)
1405 		return NULL;
1406 
1407 	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1408 		bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1409 
1410 	return bfqg;
1411 }
1412 #endif	/* CONFIG_BFQ_GROUP_IOSCHED */
1413