xref: /openbmc/linux/block/blk-throttle.c (revision ec2da07c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Interface for controlling IO bandwidth on a request queue
4  *
5  * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/blktrace_api.h>
13 #include <linux/blk-cgroup.h>
14 #include "blk.h"
15 
16 /* Max dispatch from a group in 1 round */
17 static int throtl_grp_quantum = 8;
18 
19 /* Total max dispatch from all groups in one round */
20 static int throtl_quantum = 32;
21 
22 /* Throttling is performed over a slice and after that slice is renewed */
23 #define DFL_THROTL_SLICE_HD (HZ / 10)
24 #define DFL_THROTL_SLICE_SSD (HZ / 50)
25 #define MAX_THROTL_SLICE (HZ)
26 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
27 #define MIN_THROTL_BPS (320 * 1024)
28 #define MIN_THROTL_IOPS (10)
29 #define DFL_LATENCY_TARGET (-1L)
30 #define DFL_IDLE_THRESHOLD (0)
31 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
32 #define LATENCY_FILTERED_SSD (0)
33 /*
34  * For HD, very small latency comes from sequential IO. Such IO is helpless to
35  * help determine if its IO is impacted by others, hence we ignore the IO
36  */
37 #define LATENCY_FILTERED_HD (1000L) /* 1ms */
38 
39 static struct blkcg_policy blkcg_policy_throtl;
40 
41 /* A workqueue to queue throttle related work */
42 static struct workqueue_struct *kthrotld_workqueue;
43 
44 /*
45  * To implement hierarchical throttling, throtl_grps form a tree and bios
46  * are dispatched upwards level by level until they reach the top and get
47  * issued.  When dispatching bios from the children and local group at each
48  * level, if the bios are dispatched into a single bio_list, there's a risk
49  * of a local or child group which can queue many bios at once filling up
50  * the list starving others.
51  *
52  * To avoid such starvation, dispatched bios are queued separately
53  * according to where they came from.  When they are again dispatched to
54  * the parent, they're popped in round-robin order so that no single source
55  * hogs the dispatch window.
56  *
57  * throtl_qnode is used to keep the queued bios separated by their sources.
58  * Bios are queued to throtl_qnode which in turn is queued to
59  * throtl_service_queue and then dispatched in round-robin order.
60  *
61  * It's also used to track the reference counts on blkg's.  A qnode always
62  * belongs to a throtl_grp and gets queued on itself or the parent, so
63  * incrementing the reference of the associated throtl_grp when a qnode is
64  * queued and decrementing when dequeued is enough to keep the whole blkg
65  * tree pinned while bios are in flight.
66  */
67 struct throtl_qnode {
68 	struct list_head	node;		/* service_queue->queued[] */
69 	struct bio_list		bios;		/* queued bios */
70 	struct throtl_grp	*tg;		/* tg this qnode belongs to */
71 };
72 
73 struct throtl_service_queue {
74 	struct throtl_service_queue *parent_sq;	/* the parent service_queue */
75 
76 	/*
77 	 * Bios queued directly to this service_queue or dispatched from
78 	 * children throtl_grp's.
79 	 */
80 	struct list_head	queued[2];	/* throtl_qnode [READ/WRITE] */
81 	unsigned int		nr_queued[2];	/* number of queued bios */
82 
83 	/*
84 	 * RB tree of active children throtl_grp's, which are sorted by
85 	 * their ->disptime.
86 	 */
87 	struct rb_root_cached	pending_tree;	/* RB tree of active tgs */
88 	unsigned int		nr_pending;	/* # queued in the tree */
89 	unsigned long		first_pending_disptime;	/* disptime of the first tg */
90 	struct timer_list	pending_timer;	/* fires on first_pending_disptime */
91 };
92 
93 enum tg_state_flags {
94 	THROTL_TG_PENDING	= 1 << 0,	/* on parent's pending tree */
95 	THROTL_TG_WAS_EMPTY	= 1 << 1,	/* bio_lists[] became non-empty */
96 };
97 
98 #define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
99 
100 enum {
101 	LIMIT_LOW,
102 	LIMIT_MAX,
103 	LIMIT_CNT,
104 };
105 
106 struct throtl_grp {
107 	/* must be the first member */
108 	struct blkg_policy_data pd;
109 
110 	/* active throtl group service_queue member */
111 	struct rb_node rb_node;
112 
113 	/* throtl_data this group belongs to */
114 	struct throtl_data *td;
115 
116 	/* this group's service queue */
117 	struct throtl_service_queue service_queue;
118 
119 	/*
120 	 * qnode_on_self is used when bios are directly queued to this
121 	 * throtl_grp so that local bios compete fairly with bios
122 	 * dispatched from children.  qnode_on_parent is used when bios are
123 	 * dispatched from this throtl_grp into its parent and will compete
124 	 * with the sibling qnode_on_parents and the parent's
125 	 * qnode_on_self.
126 	 */
127 	struct throtl_qnode qnode_on_self[2];
128 	struct throtl_qnode qnode_on_parent[2];
129 
130 	/*
131 	 * Dispatch time in jiffies. This is the estimated time when group
132 	 * will unthrottle and is ready to dispatch more bio. It is used as
133 	 * key to sort active groups in service tree.
134 	 */
135 	unsigned long disptime;
136 
137 	unsigned int flags;
138 
139 	/* are there any throtl rules between this group and td? */
140 	bool has_rules[2];
141 
142 	/* internally used bytes per second rate limits */
143 	uint64_t bps[2][LIMIT_CNT];
144 	/* user configured bps limits */
145 	uint64_t bps_conf[2][LIMIT_CNT];
146 
147 	/* internally used IOPS limits */
148 	unsigned int iops[2][LIMIT_CNT];
149 	/* user configured IOPS limits */
150 	unsigned int iops_conf[2][LIMIT_CNT];
151 
152 	/* Number of bytes disptached in current slice */
153 	uint64_t bytes_disp[2];
154 	/* Number of bio's dispatched in current slice */
155 	unsigned int io_disp[2];
156 
157 	unsigned long last_low_overflow_time[2];
158 
159 	uint64_t last_bytes_disp[2];
160 	unsigned int last_io_disp[2];
161 
162 	unsigned long last_check_time;
163 
164 	unsigned long latency_target; /* us */
165 	unsigned long latency_target_conf; /* us */
166 	/* When did we start a new slice */
167 	unsigned long slice_start[2];
168 	unsigned long slice_end[2];
169 
170 	unsigned long last_finish_time; /* ns / 1024 */
171 	unsigned long checked_last_finish_time; /* ns / 1024 */
172 	unsigned long avg_idletime; /* ns / 1024 */
173 	unsigned long idletime_threshold; /* us */
174 	unsigned long idletime_threshold_conf; /* us */
175 
176 	unsigned int bio_cnt; /* total bios */
177 	unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
178 	unsigned long bio_cnt_reset_time;
179 };
180 
181 /* We measure latency for request size from <= 4k to >= 1M */
182 #define LATENCY_BUCKET_SIZE 9
183 
184 struct latency_bucket {
185 	unsigned long total_latency; /* ns / 1024 */
186 	int samples;
187 };
188 
189 struct avg_latency_bucket {
190 	unsigned long latency; /* ns / 1024 */
191 	bool valid;
192 };
193 
194 struct throtl_data
195 {
196 	/* service tree for active throtl groups */
197 	struct throtl_service_queue service_queue;
198 
199 	struct request_queue *queue;
200 
201 	/* Total Number of queued bios on READ and WRITE lists */
202 	unsigned int nr_queued[2];
203 
204 	unsigned int throtl_slice;
205 
206 	/* Work for dispatching throttled bios */
207 	struct work_struct dispatch_work;
208 	unsigned int limit_index;
209 	bool limit_valid[LIMIT_CNT];
210 
211 	unsigned long low_upgrade_time;
212 	unsigned long low_downgrade_time;
213 
214 	unsigned int scale;
215 
216 	struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
217 	struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
218 	struct latency_bucket __percpu *latency_buckets[2];
219 	unsigned long last_calculate_time;
220 	unsigned long filtered_latency;
221 
222 	bool track_bio_latency;
223 };
224 
225 static void throtl_pending_timer_fn(struct timer_list *t);
226 
227 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
228 {
229 	return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
230 }
231 
232 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
233 {
234 	return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
235 }
236 
237 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
238 {
239 	return pd_to_blkg(&tg->pd);
240 }
241 
242 /**
243  * sq_to_tg - return the throl_grp the specified service queue belongs to
244  * @sq: the throtl_service_queue of interest
245  *
246  * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
247  * embedded in throtl_data, %NULL is returned.
248  */
249 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
250 {
251 	if (sq && sq->parent_sq)
252 		return container_of(sq, struct throtl_grp, service_queue);
253 	else
254 		return NULL;
255 }
256 
257 /**
258  * sq_to_td - return throtl_data the specified service queue belongs to
259  * @sq: the throtl_service_queue of interest
260  *
261  * A service_queue can be embedded in either a throtl_grp or throtl_data.
262  * Determine the associated throtl_data accordingly and return it.
263  */
264 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
265 {
266 	struct throtl_grp *tg = sq_to_tg(sq);
267 
268 	if (tg)
269 		return tg->td;
270 	else
271 		return container_of(sq, struct throtl_data, service_queue);
272 }
273 
274 /*
275  * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
276  * make the IO dispatch more smooth.
277  * Scale up: linearly scale up according to lapsed time since upgrade. For
278  *           every throtl_slice, the limit scales up 1/2 .low limit till the
279  *           limit hits .max limit
280  * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
281  */
282 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
283 {
284 	/* arbitrary value to avoid too big scale */
285 	if (td->scale < 4096 && time_after_eq(jiffies,
286 	    td->low_upgrade_time + td->scale * td->throtl_slice))
287 		td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
288 
289 	return low + (low >> 1) * td->scale;
290 }
291 
292 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
293 {
294 	struct blkcg_gq *blkg = tg_to_blkg(tg);
295 	struct throtl_data *td;
296 	uint64_t ret;
297 
298 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
299 		return U64_MAX;
300 
301 	td = tg->td;
302 	ret = tg->bps[rw][td->limit_index];
303 	if (ret == 0 && td->limit_index == LIMIT_LOW) {
304 		/* intermediate node or iops isn't 0 */
305 		if (!list_empty(&blkg->blkcg->css.children) ||
306 		    tg->iops[rw][td->limit_index])
307 			return U64_MAX;
308 		else
309 			return MIN_THROTL_BPS;
310 	}
311 
312 	if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
313 	    tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
314 		uint64_t adjusted;
315 
316 		adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
317 		ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
318 	}
319 	return ret;
320 }
321 
322 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
323 {
324 	struct blkcg_gq *blkg = tg_to_blkg(tg);
325 	struct throtl_data *td;
326 	unsigned int ret;
327 
328 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
329 		return UINT_MAX;
330 
331 	td = tg->td;
332 	ret = tg->iops[rw][td->limit_index];
333 	if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
334 		/* intermediate node or bps isn't 0 */
335 		if (!list_empty(&blkg->blkcg->css.children) ||
336 		    tg->bps[rw][td->limit_index])
337 			return UINT_MAX;
338 		else
339 			return MIN_THROTL_IOPS;
340 	}
341 
342 	if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
343 	    tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
344 		uint64_t adjusted;
345 
346 		adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
347 		if (adjusted > UINT_MAX)
348 			adjusted = UINT_MAX;
349 		ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
350 	}
351 	return ret;
352 }
353 
354 #define request_bucket_index(sectors) \
355 	clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
356 
357 /**
358  * throtl_log - log debug message via blktrace
359  * @sq: the service_queue being reported
360  * @fmt: printf format string
361  * @args: printf args
362  *
363  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
364  * throtl_grp; otherwise, just "throtl".
365  */
366 #define throtl_log(sq, fmt, args...)	do {				\
367 	struct throtl_grp *__tg = sq_to_tg((sq));			\
368 	struct throtl_data *__td = sq_to_td((sq));			\
369 									\
370 	(void)__td;							\
371 	if (likely(!blk_trace_note_message_enabled(__td->queue)))	\
372 		break;							\
373 	if ((__tg)) {							\
374 		blk_add_cgroup_trace_msg(__td->queue,			\
375 			tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
376 	} else {							\
377 		blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);	\
378 	}								\
379 } while (0)
380 
381 static inline unsigned int throtl_bio_data_size(struct bio *bio)
382 {
383 	/* assume it's one sector */
384 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
385 		return 512;
386 	return bio->bi_iter.bi_size;
387 }
388 
389 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
390 {
391 	INIT_LIST_HEAD(&qn->node);
392 	bio_list_init(&qn->bios);
393 	qn->tg = tg;
394 }
395 
396 /**
397  * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
398  * @bio: bio being added
399  * @qn: qnode to add bio to
400  * @queued: the service_queue->queued[] list @qn belongs to
401  *
402  * Add @bio to @qn and put @qn on @queued if it's not already on.
403  * @qn->tg's reference count is bumped when @qn is activated.  See the
404  * comment on top of throtl_qnode definition for details.
405  */
406 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
407 				 struct list_head *queued)
408 {
409 	bio_list_add(&qn->bios, bio);
410 	if (list_empty(&qn->node)) {
411 		list_add_tail(&qn->node, queued);
412 		blkg_get(tg_to_blkg(qn->tg));
413 	}
414 }
415 
416 /**
417  * throtl_peek_queued - peek the first bio on a qnode list
418  * @queued: the qnode list to peek
419  */
420 static struct bio *throtl_peek_queued(struct list_head *queued)
421 {
422 	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
423 	struct bio *bio;
424 
425 	if (list_empty(queued))
426 		return NULL;
427 
428 	bio = bio_list_peek(&qn->bios);
429 	WARN_ON_ONCE(!bio);
430 	return bio;
431 }
432 
433 /**
434  * throtl_pop_queued - pop the first bio form a qnode list
435  * @queued: the qnode list to pop a bio from
436  * @tg_to_put: optional out argument for throtl_grp to put
437  *
438  * Pop the first bio from the qnode list @queued.  After popping, the first
439  * qnode is removed from @queued if empty or moved to the end of @queued so
440  * that the popping order is round-robin.
441  *
442  * When the first qnode is removed, its associated throtl_grp should be put
443  * too.  If @tg_to_put is NULL, this function automatically puts it;
444  * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
445  * responsible for putting it.
446  */
447 static struct bio *throtl_pop_queued(struct list_head *queued,
448 				     struct throtl_grp **tg_to_put)
449 {
450 	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
451 	struct bio *bio;
452 
453 	if (list_empty(queued))
454 		return NULL;
455 
456 	bio = bio_list_pop(&qn->bios);
457 	WARN_ON_ONCE(!bio);
458 
459 	if (bio_list_empty(&qn->bios)) {
460 		list_del_init(&qn->node);
461 		if (tg_to_put)
462 			*tg_to_put = qn->tg;
463 		else
464 			blkg_put(tg_to_blkg(qn->tg));
465 	} else {
466 		list_move_tail(&qn->node, queued);
467 	}
468 
469 	return bio;
470 }
471 
472 /* init a service_queue, assumes the caller zeroed it */
473 static void throtl_service_queue_init(struct throtl_service_queue *sq)
474 {
475 	INIT_LIST_HEAD(&sq->queued[0]);
476 	INIT_LIST_HEAD(&sq->queued[1]);
477 	sq->pending_tree = RB_ROOT_CACHED;
478 	timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
479 }
480 
481 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
482 {
483 	struct throtl_grp *tg;
484 	int rw;
485 
486 	tg = kzalloc_node(sizeof(*tg), gfp, node);
487 	if (!tg)
488 		return NULL;
489 
490 	throtl_service_queue_init(&tg->service_queue);
491 
492 	for (rw = READ; rw <= WRITE; rw++) {
493 		throtl_qnode_init(&tg->qnode_on_self[rw], tg);
494 		throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
495 	}
496 
497 	RB_CLEAR_NODE(&tg->rb_node);
498 	tg->bps[READ][LIMIT_MAX] = U64_MAX;
499 	tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
500 	tg->iops[READ][LIMIT_MAX] = UINT_MAX;
501 	tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
502 	tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
503 	tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
504 	tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
505 	tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
506 	/* LIMIT_LOW will have default value 0 */
507 
508 	tg->latency_target = DFL_LATENCY_TARGET;
509 	tg->latency_target_conf = DFL_LATENCY_TARGET;
510 	tg->idletime_threshold = DFL_IDLE_THRESHOLD;
511 	tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
512 
513 	return &tg->pd;
514 }
515 
516 static void throtl_pd_init(struct blkg_policy_data *pd)
517 {
518 	struct throtl_grp *tg = pd_to_tg(pd);
519 	struct blkcg_gq *blkg = tg_to_blkg(tg);
520 	struct throtl_data *td = blkg->q->td;
521 	struct throtl_service_queue *sq = &tg->service_queue;
522 
523 	/*
524 	 * If on the default hierarchy, we switch to properly hierarchical
525 	 * behavior where limits on a given throtl_grp are applied to the
526 	 * whole subtree rather than just the group itself.  e.g. If 16M
527 	 * read_bps limit is set on the root group, the whole system can't
528 	 * exceed 16M for the device.
529 	 *
530 	 * If not on the default hierarchy, the broken flat hierarchy
531 	 * behavior is retained where all throtl_grps are treated as if
532 	 * they're all separate root groups right below throtl_data.
533 	 * Limits of a group don't interact with limits of other groups
534 	 * regardless of the position of the group in the hierarchy.
535 	 */
536 	sq->parent_sq = &td->service_queue;
537 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
538 		sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
539 	tg->td = td;
540 }
541 
542 /*
543  * Set has_rules[] if @tg or any of its parents have limits configured.
544  * This doesn't require walking up to the top of the hierarchy as the
545  * parent's has_rules[] is guaranteed to be correct.
546  */
547 static void tg_update_has_rules(struct throtl_grp *tg)
548 {
549 	struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
550 	struct throtl_data *td = tg->td;
551 	int rw;
552 
553 	for (rw = READ; rw <= WRITE; rw++)
554 		tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
555 			(td->limit_valid[td->limit_index] &&
556 			 (tg_bps_limit(tg, rw) != U64_MAX ||
557 			  tg_iops_limit(tg, rw) != UINT_MAX));
558 }
559 
560 static void throtl_pd_online(struct blkg_policy_data *pd)
561 {
562 	struct throtl_grp *tg = pd_to_tg(pd);
563 	/*
564 	 * We don't want new groups to escape the limits of its ancestors.
565 	 * Update has_rules[] after a new group is brought online.
566 	 */
567 	tg_update_has_rules(tg);
568 }
569 
570 static void blk_throtl_update_limit_valid(struct throtl_data *td)
571 {
572 	struct cgroup_subsys_state *pos_css;
573 	struct blkcg_gq *blkg;
574 	bool low_valid = false;
575 
576 	rcu_read_lock();
577 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
578 		struct throtl_grp *tg = blkg_to_tg(blkg);
579 
580 		if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
581 		    tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
582 			low_valid = true;
583 			break;
584 		}
585 	}
586 	rcu_read_unlock();
587 
588 	td->limit_valid[LIMIT_LOW] = low_valid;
589 }
590 
591 static void throtl_upgrade_state(struct throtl_data *td);
592 static void throtl_pd_offline(struct blkg_policy_data *pd)
593 {
594 	struct throtl_grp *tg = pd_to_tg(pd);
595 
596 	tg->bps[READ][LIMIT_LOW] = 0;
597 	tg->bps[WRITE][LIMIT_LOW] = 0;
598 	tg->iops[READ][LIMIT_LOW] = 0;
599 	tg->iops[WRITE][LIMIT_LOW] = 0;
600 
601 	blk_throtl_update_limit_valid(tg->td);
602 
603 	if (!tg->td->limit_valid[tg->td->limit_index])
604 		throtl_upgrade_state(tg->td);
605 }
606 
607 static void throtl_pd_free(struct blkg_policy_data *pd)
608 {
609 	struct throtl_grp *tg = pd_to_tg(pd);
610 
611 	del_timer_sync(&tg->service_queue.pending_timer);
612 	kfree(tg);
613 }
614 
615 static struct throtl_grp *
616 throtl_rb_first(struct throtl_service_queue *parent_sq)
617 {
618 	struct rb_node *n;
619 	/* Service tree is empty */
620 	if (!parent_sq->nr_pending)
621 		return NULL;
622 
623 	n = rb_first_cached(&parent_sq->pending_tree);
624 	WARN_ON_ONCE(!n);
625 	if (!n)
626 		return NULL;
627 	return rb_entry_tg(n);
628 }
629 
630 static void throtl_rb_erase(struct rb_node *n,
631 			    struct throtl_service_queue *parent_sq)
632 {
633 	rb_erase_cached(n, &parent_sq->pending_tree);
634 	RB_CLEAR_NODE(n);
635 	--parent_sq->nr_pending;
636 }
637 
638 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
639 {
640 	struct throtl_grp *tg;
641 
642 	tg = throtl_rb_first(parent_sq);
643 	if (!tg)
644 		return;
645 
646 	parent_sq->first_pending_disptime = tg->disptime;
647 }
648 
649 static void tg_service_queue_add(struct throtl_grp *tg)
650 {
651 	struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
652 	struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
653 	struct rb_node *parent = NULL;
654 	struct throtl_grp *__tg;
655 	unsigned long key = tg->disptime;
656 	bool leftmost = true;
657 
658 	while (*node != NULL) {
659 		parent = *node;
660 		__tg = rb_entry_tg(parent);
661 
662 		if (time_before(key, __tg->disptime))
663 			node = &parent->rb_left;
664 		else {
665 			node = &parent->rb_right;
666 			leftmost = false;
667 		}
668 	}
669 
670 	rb_link_node(&tg->rb_node, parent, node);
671 	rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
672 			       leftmost);
673 }
674 
675 static void __throtl_enqueue_tg(struct throtl_grp *tg)
676 {
677 	tg_service_queue_add(tg);
678 	tg->flags |= THROTL_TG_PENDING;
679 	tg->service_queue.parent_sq->nr_pending++;
680 }
681 
682 static void throtl_enqueue_tg(struct throtl_grp *tg)
683 {
684 	if (!(tg->flags & THROTL_TG_PENDING))
685 		__throtl_enqueue_tg(tg);
686 }
687 
688 static void __throtl_dequeue_tg(struct throtl_grp *tg)
689 {
690 	throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
691 	tg->flags &= ~THROTL_TG_PENDING;
692 }
693 
694 static void throtl_dequeue_tg(struct throtl_grp *tg)
695 {
696 	if (tg->flags & THROTL_TG_PENDING)
697 		__throtl_dequeue_tg(tg);
698 }
699 
700 /* Call with queue lock held */
701 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
702 					  unsigned long expires)
703 {
704 	unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
705 
706 	/*
707 	 * Since we are adjusting the throttle limit dynamically, the sleep
708 	 * time calculated according to previous limit might be invalid. It's
709 	 * possible the cgroup sleep time is very long and no other cgroups
710 	 * have IO running so notify the limit changes. Make sure the cgroup
711 	 * doesn't sleep too long to avoid the missed notification.
712 	 */
713 	if (time_after(expires, max_expire))
714 		expires = max_expire;
715 	mod_timer(&sq->pending_timer, expires);
716 	throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
717 		   expires - jiffies, jiffies);
718 }
719 
720 /**
721  * throtl_schedule_next_dispatch - schedule the next dispatch cycle
722  * @sq: the service_queue to schedule dispatch for
723  * @force: force scheduling
724  *
725  * Arm @sq->pending_timer so that the next dispatch cycle starts on the
726  * dispatch time of the first pending child.  Returns %true if either timer
727  * is armed or there's no pending child left.  %false if the current
728  * dispatch window is still open and the caller should continue
729  * dispatching.
730  *
731  * If @force is %true, the dispatch timer is always scheduled and this
732  * function is guaranteed to return %true.  This is to be used when the
733  * caller can't dispatch itself and needs to invoke pending_timer
734  * unconditionally.  Note that forced scheduling is likely to induce short
735  * delay before dispatch starts even if @sq->first_pending_disptime is not
736  * in the future and thus shouldn't be used in hot paths.
737  */
738 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
739 					  bool force)
740 {
741 	/* any pending children left? */
742 	if (!sq->nr_pending)
743 		return true;
744 
745 	update_min_dispatch_time(sq);
746 
747 	/* is the next dispatch time in the future? */
748 	if (force || time_after(sq->first_pending_disptime, jiffies)) {
749 		throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
750 		return true;
751 	}
752 
753 	/* tell the caller to continue dispatching */
754 	return false;
755 }
756 
757 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
758 		bool rw, unsigned long start)
759 {
760 	tg->bytes_disp[rw] = 0;
761 	tg->io_disp[rw] = 0;
762 
763 	/*
764 	 * Previous slice has expired. We must have trimmed it after last
765 	 * bio dispatch. That means since start of last slice, we never used
766 	 * that bandwidth. Do try to make use of that bandwidth while giving
767 	 * credit.
768 	 */
769 	if (time_after_eq(start, tg->slice_start[rw]))
770 		tg->slice_start[rw] = start;
771 
772 	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
773 	throtl_log(&tg->service_queue,
774 		   "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
775 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
776 		   tg->slice_end[rw], jiffies);
777 }
778 
779 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
780 {
781 	tg->bytes_disp[rw] = 0;
782 	tg->io_disp[rw] = 0;
783 	tg->slice_start[rw] = jiffies;
784 	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
785 	throtl_log(&tg->service_queue,
786 		   "[%c] new slice start=%lu end=%lu jiffies=%lu",
787 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
788 		   tg->slice_end[rw], jiffies);
789 }
790 
791 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
792 					unsigned long jiffy_end)
793 {
794 	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
795 }
796 
797 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
798 				       unsigned long jiffy_end)
799 {
800 	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
801 	throtl_log(&tg->service_queue,
802 		   "[%c] extend slice start=%lu end=%lu jiffies=%lu",
803 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
804 		   tg->slice_end[rw], jiffies);
805 }
806 
807 /* Determine if previously allocated or extended slice is complete or not */
808 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
809 {
810 	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
811 		return false;
812 
813 	return true;
814 }
815 
816 /* Trim the used slices and adjust slice start accordingly */
817 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
818 {
819 	unsigned long nr_slices, time_elapsed, io_trim;
820 	u64 bytes_trim, tmp;
821 
822 	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
823 
824 	/*
825 	 * If bps are unlimited (-1), then time slice don't get
826 	 * renewed. Don't try to trim the slice if slice is used. A new
827 	 * slice will start when appropriate.
828 	 */
829 	if (throtl_slice_used(tg, rw))
830 		return;
831 
832 	/*
833 	 * A bio has been dispatched. Also adjust slice_end. It might happen
834 	 * that initially cgroup limit was very low resulting in high
835 	 * slice_end, but later limit was bumped up and bio was dispached
836 	 * sooner, then we need to reduce slice_end. A high bogus slice_end
837 	 * is bad because it does not allow new slice to start.
838 	 */
839 
840 	throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
841 
842 	time_elapsed = jiffies - tg->slice_start[rw];
843 
844 	nr_slices = time_elapsed / tg->td->throtl_slice;
845 
846 	if (!nr_slices)
847 		return;
848 	tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
849 	do_div(tmp, HZ);
850 	bytes_trim = tmp;
851 
852 	io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
853 		HZ;
854 
855 	if (!bytes_trim && !io_trim)
856 		return;
857 
858 	if (tg->bytes_disp[rw] >= bytes_trim)
859 		tg->bytes_disp[rw] -= bytes_trim;
860 	else
861 		tg->bytes_disp[rw] = 0;
862 
863 	if (tg->io_disp[rw] >= io_trim)
864 		tg->io_disp[rw] -= io_trim;
865 	else
866 		tg->io_disp[rw] = 0;
867 
868 	tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
869 
870 	throtl_log(&tg->service_queue,
871 		   "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
872 		   rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
873 		   tg->slice_start[rw], tg->slice_end[rw], jiffies);
874 }
875 
876 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
877 				  unsigned long *wait)
878 {
879 	bool rw = bio_data_dir(bio);
880 	unsigned int io_allowed;
881 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
882 	u64 tmp;
883 
884 	jiffy_elapsed = jiffies - tg->slice_start[rw];
885 
886 	/* Round up to the next throttle slice, wait time must be nonzero */
887 	jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
888 
889 	/*
890 	 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
891 	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
892 	 * will allow dispatch after 1 second and after that slice should
893 	 * have been trimmed.
894 	 */
895 
896 	tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
897 	do_div(tmp, HZ);
898 
899 	if (tmp > UINT_MAX)
900 		io_allowed = UINT_MAX;
901 	else
902 		io_allowed = tmp;
903 
904 	if (tg->io_disp[rw] + 1 <= io_allowed) {
905 		if (wait)
906 			*wait = 0;
907 		return true;
908 	}
909 
910 	/* Calc approx time to dispatch */
911 	jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
912 
913 	if (wait)
914 		*wait = jiffy_wait;
915 	return false;
916 }
917 
918 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
919 				 unsigned long *wait)
920 {
921 	bool rw = bio_data_dir(bio);
922 	u64 bytes_allowed, extra_bytes, tmp;
923 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
924 	unsigned int bio_size = throtl_bio_data_size(bio);
925 
926 	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
927 
928 	/* Slice has just started. Consider one slice interval */
929 	if (!jiffy_elapsed)
930 		jiffy_elapsed_rnd = tg->td->throtl_slice;
931 
932 	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
933 
934 	tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
935 	do_div(tmp, HZ);
936 	bytes_allowed = tmp;
937 
938 	if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
939 		if (wait)
940 			*wait = 0;
941 		return true;
942 	}
943 
944 	/* Calc approx time to dispatch */
945 	extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
946 	jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
947 
948 	if (!jiffy_wait)
949 		jiffy_wait = 1;
950 
951 	/*
952 	 * This wait time is without taking into consideration the rounding
953 	 * up we did. Add that time also.
954 	 */
955 	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
956 	if (wait)
957 		*wait = jiffy_wait;
958 	return false;
959 }
960 
961 /*
962  * Returns whether one can dispatch a bio or not. Also returns approx number
963  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
964  */
965 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
966 			    unsigned long *wait)
967 {
968 	bool rw = bio_data_dir(bio);
969 	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
970 
971 	/*
972  	 * Currently whole state machine of group depends on first bio
973 	 * queued in the group bio list. So one should not be calling
974 	 * this function with a different bio if there are other bios
975 	 * queued.
976 	 */
977 	BUG_ON(tg->service_queue.nr_queued[rw] &&
978 	       bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
979 
980 	/* If tg->bps = -1, then BW is unlimited */
981 	if (tg_bps_limit(tg, rw) == U64_MAX &&
982 	    tg_iops_limit(tg, rw) == UINT_MAX) {
983 		if (wait)
984 			*wait = 0;
985 		return true;
986 	}
987 
988 	/*
989 	 * If previous slice expired, start a new one otherwise renew/extend
990 	 * existing slice to make sure it is at least throtl_slice interval
991 	 * long since now. New slice is started only for empty throttle group.
992 	 * If there is queued bio, that means there should be an active
993 	 * slice and it should be extended instead.
994 	 */
995 	if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
996 		throtl_start_new_slice(tg, rw);
997 	else {
998 		if (time_before(tg->slice_end[rw],
999 		    jiffies + tg->td->throtl_slice))
1000 			throtl_extend_slice(tg, rw,
1001 				jiffies + tg->td->throtl_slice);
1002 	}
1003 
1004 	if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
1005 	    tg_with_in_iops_limit(tg, bio, &iops_wait)) {
1006 		if (wait)
1007 			*wait = 0;
1008 		return true;
1009 	}
1010 
1011 	max_wait = max(bps_wait, iops_wait);
1012 
1013 	if (wait)
1014 		*wait = max_wait;
1015 
1016 	if (time_before(tg->slice_end[rw], jiffies + max_wait))
1017 		throtl_extend_slice(tg, rw, jiffies + max_wait);
1018 
1019 	return false;
1020 }
1021 
1022 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1023 {
1024 	bool rw = bio_data_dir(bio);
1025 	unsigned int bio_size = throtl_bio_data_size(bio);
1026 
1027 	/* Charge the bio to the group */
1028 	tg->bytes_disp[rw] += bio_size;
1029 	tg->io_disp[rw]++;
1030 	tg->last_bytes_disp[rw] += bio_size;
1031 	tg->last_io_disp[rw]++;
1032 
1033 	/*
1034 	 * BIO_THROTTLED is used to prevent the same bio to be throttled
1035 	 * more than once as a throttled bio will go through blk-throtl the
1036 	 * second time when it eventually gets issued.  Set it when a bio
1037 	 * is being charged to a tg.
1038 	 */
1039 	if (!bio_flagged(bio, BIO_THROTTLED))
1040 		bio_set_flag(bio, BIO_THROTTLED);
1041 }
1042 
1043 /**
1044  * throtl_add_bio_tg - add a bio to the specified throtl_grp
1045  * @bio: bio to add
1046  * @qn: qnode to use
1047  * @tg: the target throtl_grp
1048  *
1049  * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
1050  * tg->qnode_on_self[] is used.
1051  */
1052 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1053 			      struct throtl_grp *tg)
1054 {
1055 	struct throtl_service_queue *sq = &tg->service_queue;
1056 	bool rw = bio_data_dir(bio);
1057 
1058 	if (!qn)
1059 		qn = &tg->qnode_on_self[rw];
1060 
1061 	/*
1062 	 * If @tg doesn't currently have any bios queued in the same
1063 	 * direction, queueing @bio can change when @tg should be
1064 	 * dispatched.  Mark that @tg was empty.  This is automatically
1065 	 * cleaered on the next tg_update_disptime().
1066 	 */
1067 	if (!sq->nr_queued[rw])
1068 		tg->flags |= THROTL_TG_WAS_EMPTY;
1069 
1070 	throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1071 
1072 	sq->nr_queued[rw]++;
1073 	throtl_enqueue_tg(tg);
1074 }
1075 
1076 static void tg_update_disptime(struct throtl_grp *tg)
1077 {
1078 	struct throtl_service_queue *sq = &tg->service_queue;
1079 	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1080 	struct bio *bio;
1081 
1082 	bio = throtl_peek_queued(&sq->queued[READ]);
1083 	if (bio)
1084 		tg_may_dispatch(tg, bio, &read_wait);
1085 
1086 	bio = throtl_peek_queued(&sq->queued[WRITE]);
1087 	if (bio)
1088 		tg_may_dispatch(tg, bio, &write_wait);
1089 
1090 	min_wait = min(read_wait, write_wait);
1091 	disptime = jiffies + min_wait;
1092 
1093 	/* Update dispatch time */
1094 	throtl_dequeue_tg(tg);
1095 	tg->disptime = disptime;
1096 	throtl_enqueue_tg(tg);
1097 
1098 	/* see throtl_add_bio_tg() */
1099 	tg->flags &= ~THROTL_TG_WAS_EMPTY;
1100 }
1101 
1102 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1103 					struct throtl_grp *parent_tg, bool rw)
1104 {
1105 	if (throtl_slice_used(parent_tg, rw)) {
1106 		throtl_start_new_slice_with_credit(parent_tg, rw,
1107 				child_tg->slice_start[rw]);
1108 	}
1109 
1110 }
1111 
1112 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1113 {
1114 	struct throtl_service_queue *sq = &tg->service_queue;
1115 	struct throtl_service_queue *parent_sq = sq->parent_sq;
1116 	struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1117 	struct throtl_grp *tg_to_put = NULL;
1118 	struct bio *bio;
1119 
1120 	/*
1121 	 * @bio is being transferred from @tg to @parent_sq.  Popping a bio
1122 	 * from @tg may put its reference and @parent_sq might end up
1123 	 * getting released prematurely.  Remember the tg to put and put it
1124 	 * after @bio is transferred to @parent_sq.
1125 	 */
1126 	bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1127 	sq->nr_queued[rw]--;
1128 
1129 	throtl_charge_bio(tg, bio);
1130 
1131 	/*
1132 	 * If our parent is another tg, we just need to transfer @bio to
1133 	 * the parent using throtl_add_bio_tg().  If our parent is
1134 	 * @td->service_queue, @bio is ready to be issued.  Put it on its
1135 	 * bio_lists[] and decrease total number queued.  The caller is
1136 	 * responsible for issuing these bios.
1137 	 */
1138 	if (parent_tg) {
1139 		throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1140 		start_parent_slice_with_credit(tg, parent_tg, rw);
1141 	} else {
1142 		throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1143 				     &parent_sq->queued[rw]);
1144 		BUG_ON(tg->td->nr_queued[rw] <= 0);
1145 		tg->td->nr_queued[rw]--;
1146 	}
1147 
1148 	throtl_trim_slice(tg, rw);
1149 
1150 	if (tg_to_put)
1151 		blkg_put(tg_to_blkg(tg_to_put));
1152 }
1153 
1154 static int throtl_dispatch_tg(struct throtl_grp *tg)
1155 {
1156 	struct throtl_service_queue *sq = &tg->service_queue;
1157 	unsigned int nr_reads = 0, nr_writes = 0;
1158 	unsigned int max_nr_reads = throtl_grp_quantum*3/4;
1159 	unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
1160 	struct bio *bio;
1161 
1162 	/* Try to dispatch 75% READS and 25% WRITES */
1163 
1164 	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1165 	       tg_may_dispatch(tg, bio, NULL)) {
1166 
1167 		tg_dispatch_one_bio(tg, bio_data_dir(bio));
1168 		nr_reads++;
1169 
1170 		if (nr_reads >= max_nr_reads)
1171 			break;
1172 	}
1173 
1174 	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1175 	       tg_may_dispatch(tg, bio, NULL)) {
1176 
1177 		tg_dispatch_one_bio(tg, bio_data_dir(bio));
1178 		nr_writes++;
1179 
1180 		if (nr_writes >= max_nr_writes)
1181 			break;
1182 	}
1183 
1184 	return nr_reads + nr_writes;
1185 }
1186 
1187 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1188 {
1189 	unsigned int nr_disp = 0;
1190 
1191 	while (1) {
1192 		struct throtl_grp *tg = throtl_rb_first(parent_sq);
1193 		struct throtl_service_queue *sq;
1194 
1195 		if (!tg)
1196 			break;
1197 
1198 		if (time_before(jiffies, tg->disptime))
1199 			break;
1200 
1201 		throtl_dequeue_tg(tg);
1202 
1203 		nr_disp += throtl_dispatch_tg(tg);
1204 
1205 		sq = &tg->service_queue;
1206 		if (sq->nr_queued[0] || sq->nr_queued[1])
1207 			tg_update_disptime(tg);
1208 
1209 		if (nr_disp >= throtl_quantum)
1210 			break;
1211 	}
1212 
1213 	return nr_disp;
1214 }
1215 
1216 static bool throtl_can_upgrade(struct throtl_data *td,
1217 	struct throtl_grp *this_tg);
1218 /**
1219  * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1220  * @t: the pending_timer member of the throtl_service_queue being serviced
1221  *
1222  * This timer is armed when a child throtl_grp with active bio's become
1223  * pending and queued on the service_queue's pending_tree and expires when
1224  * the first child throtl_grp should be dispatched.  This function
1225  * dispatches bio's from the children throtl_grps to the parent
1226  * service_queue.
1227  *
1228  * If the parent's parent is another throtl_grp, dispatching is propagated
1229  * by either arming its pending_timer or repeating dispatch directly.  If
1230  * the top-level service_tree is reached, throtl_data->dispatch_work is
1231  * kicked so that the ready bio's are issued.
1232  */
1233 static void throtl_pending_timer_fn(struct timer_list *t)
1234 {
1235 	struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1236 	struct throtl_grp *tg = sq_to_tg(sq);
1237 	struct throtl_data *td = sq_to_td(sq);
1238 	struct request_queue *q = td->queue;
1239 	struct throtl_service_queue *parent_sq;
1240 	bool dispatched;
1241 	int ret;
1242 
1243 	spin_lock_irq(&q->queue_lock);
1244 	if (throtl_can_upgrade(td, NULL))
1245 		throtl_upgrade_state(td);
1246 
1247 again:
1248 	parent_sq = sq->parent_sq;
1249 	dispatched = false;
1250 
1251 	while (true) {
1252 		throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1253 			   sq->nr_queued[READ] + sq->nr_queued[WRITE],
1254 			   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1255 
1256 		ret = throtl_select_dispatch(sq);
1257 		if (ret) {
1258 			throtl_log(sq, "bios disp=%u", ret);
1259 			dispatched = true;
1260 		}
1261 
1262 		if (throtl_schedule_next_dispatch(sq, false))
1263 			break;
1264 
1265 		/* this dispatch windows is still open, relax and repeat */
1266 		spin_unlock_irq(&q->queue_lock);
1267 		cpu_relax();
1268 		spin_lock_irq(&q->queue_lock);
1269 	}
1270 
1271 	if (!dispatched)
1272 		goto out_unlock;
1273 
1274 	if (parent_sq) {
1275 		/* @parent_sq is another throl_grp, propagate dispatch */
1276 		if (tg->flags & THROTL_TG_WAS_EMPTY) {
1277 			tg_update_disptime(tg);
1278 			if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1279 				/* window is already open, repeat dispatching */
1280 				sq = parent_sq;
1281 				tg = sq_to_tg(sq);
1282 				goto again;
1283 			}
1284 		}
1285 	} else {
1286 		/* reached the top-level, queue issueing */
1287 		queue_work(kthrotld_workqueue, &td->dispatch_work);
1288 	}
1289 out_unlock:
1290 	spin_unlock_irq(&q->queue_lock);
1291 }
1292 
1293 /**
1294  * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1295  * @work: work item being executed
1296  *
1297  * This function is queued for execution when bio's reach the bio_lists[]
1298  * of throtl_data->service_queue.  Those bio's are ready and issued by this
1299  * function.
1300  */
1301 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1302 {
1303 	struct throtl_data *td = container_of(work, struct throtl_data,
1304 					      dispatch_work);
1305 	struct throtl_service_queue *td_sq = &td->service_queue;
1306 	struct request_queue *q = td->queue;
1307 	struct bio_list bio_list_on_stack;
1308 	struct bio *bio;
1309 	struct blk_plug plug;
1310 	int rw;
1311 
1312 	bio_list_init(&bio_list_on_stack);
1313 
1314 	spin_lock_irq(&q->queue_lock);
1315 	for (rw = READ; rw <= WRITE; rw++)
1316 		while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1317 			bio_list_add(&bio_list_on_stack, bio);
1318 	spin_unlock_irq(&q->queue_lock);
1319 
1320 	if (!bio_list_empty(&bio_list_on_stack)) {
1321 		blk_start_plug(&plug);
1322 		while((bio = bio_list_pop(&bio_list_on_stack)))
1323 			generic_make_request(bio);
1324 		blk_finish_plug(&plug);
1325 	}
1326 }
1327 
1328 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1329 			      int off)
1330 {
1331 	struct throtl_grp *tg = pd_to_tg(pd);
1332 	u64 v = *(u64 *)((void *)tg + off);
1333 
1334 	if (v == U64_MAX)
1335 		return 0;
1336 	return __blkg_prfill_u64(sf, pd, v);
1337 }
1338 
1339 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1340 			       int off)
1341 {
1342 	struct throtl_grp *tg = pd_to_tg(pd);
1343 	unsigned int v = *(unsigned int *)((void *)tg + off);
1344 
1345 	if (v == UINT_MAX)
1346 		return 0;
1347 	return __blkg_prfill_u64(sf, pd, v);
1348 }
1349 
1350 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1351 {
1352 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1353 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1354 	return 0;
1355 }
1356 
1357 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1358 {
1359 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1360 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1361 	return 0;
1362 }
1363 
1364 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1365 {
1366 	struct throtl_service_queue *sq = &tg->service_queue;
1367 	struct cgroup_subsys_state *pos_css;
1368 	struct blkcg_gq *blkg;
1369 
1370 	throtl_log(&tg->service_queue,
1371 		   "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1372 		   tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1373 		   tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1374 
1375 	/*
1376 	 * Update has_rules[] flags for the updated tg's subtree.  A tg is
1377 	 * considered to have rules if either the tg itself or any of its
1378 	 * ancestors has rules.  This identifies groups without any
1379 	 * restrictions in the whole hierarchy and allows them to bypass
1380 	 * blk-throttle.
1381 	 */
1382 	blkg_for_each_descendant_pre(blkg, pos_css,
1383 			global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1384 		struct throtl_grp *this_tg = blkg_to_tg(blkg);
1385 		struct throtl_grp *parent_tg;
1386 
1387 		tg_update_has_rules(this_tg);
1388 		/* ignore root/second level */
1389 		if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1390 		    !blkg->parent->parent)
1391 			continue;
1392 		parent_tg = blkg_to_tg(blkg->parent);
1393 		/*
1394 		 * make sure all children has lower idle time threshold and
1395 		 * higher latency target
1396 		 */
1397 		this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1398 				parent_tg->idletime_threshold);
1399 		this_tg->latency_target = max(this_tg->latency_target,
1400 				parent_tg->latency_target);
1401 	}
1402 
1403 	/*
1404 	 * We're already holding queue_lock and know @tg is valid.  Let's
1405 	 * apply the new config directly.
1406 	 *
1407 	 * Restart the slices for both READ and WRITES. It might happen
1408 	 * that a group's limit are dropped suddenly and we don't want to
1409 	 * account recently dispatched IO with new low rate.
1410 	 */
1411 	throtl_start_new_slice(tg, 0);
1412 	throtl_start_new_slice(tg, 1);
1413 
1414 	if (tg->flags & THROTL_TG_PENDING) {
1415 		tg_update_disptime(tg);
1416 		throtl_schedule_next_dispatch(sq->parent_sq, true);
1417 	}
1418 }
1419 
1420 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1421 			   char *buf, size_t nbytes, loff_t off, bool is_u64)
1422 {
1423 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1424 	struct blkg_conf_ctx ctx;
1425 	struct throtl_grp *tg;
1426 	int ret;
1427 	u64 v;
1428 
1429 	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1430 	if (ret)
1431 		return ret;
1432 
1433 	ret = -EINVAL;
1434 	if (sscanf(ctx.body, "%llu", &v) != 1)
1435 		goto out_finish;
1436 	if (!v)
1437 		v = U64_MAX;
1438 
1439 	tg = blkg_to_tg(ctx.blkg);
1440 
1441 	if (is_u64)
1442 		*(u64 *)((void *)tg + of_cft(of)->private) = v;
1443 	else
1444 		*(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1445 
1446 	tg_conf_updated(tg, false);
1447 	ret = 0;
1448 out_finish:
1449 	blkg_conf_finish(&ctx);
1450 	return ret ?: nbytes;
1451 }
1452 
1453 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1454 			       char *buf, size_t nbytes, loff_t off)
1455 {
1456 	return tg_set_conf(of, buf, nbytes, off, true);
1457 }
1458 
1459 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1460 				char *buf, size_t nbytes, loff_t off)
1461 {
1462 	return tg_set_conf(of, buf, nbytes, off, false);
1463 }
1464 
1465 static struct cftype throtl_legacy_files[] = {
1466 	{
1467 		.name = "throttle.read_bps_device",
1468 		.private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1469 		.seq_show = tg_print_conf_u64,
1470 		.write = tg_set_conf_u64,
1471 	},
1472 	{
1473 		.name = "throttle.write_bps_device",
1474 		.private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1475 		.seq_show = tg_print_conf_u64,
1476 		.write = tg_set_conf_u64,
1477 	},
1478 	{
1479 		.name = "throttle.read_iops_device",
1480 		.private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1481 		.seq_show = tg_print_conf_uint,
1482 		.write = tg_set_conf_uint,
1483 	},
1484 	{
1485 		.name = "throttle.write_iops_device",
1486 		.private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1487 		.seq_show = tg_print_conf_uint,
1488 		.write = tg_set_conf_uint,
1489 	},
1490 	{
1491 		.name = "throttle.io_service_bytes",
1492 		.private = (unsigned long)&blkcg_policy_throtl,
1493 		.seq_show = blkg_print_stat_bytes,
1494 	},
1495 	{
1496 		.name = "throttle.io_service_bytes_recursive",
1497 		.private = (unsigned long)&blkcg_policy_throtl,
1498 		.seq_show = blkg_print_stat_bytes_recursive,
1499 	},
1500 	{
1501 		.name = "throttle.io_serviced",
1502 		.private = (unsigned long)&blkcg_policy_throtl,
1503 		.seq_show = blkg_print_stat_ios,
1504 	},
1505 	{
1506 		.name = "throttle.io_serviced_recursive",
1507 		.private = (unsigned long)&blkcg_policy_throtl,
1508 		.seq_show = blkg_print_stat_ios_recursive,
1509 	},
1510 	{ }	/* terminate */
1511 };
1512 
1513 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1514 			 int off)
1515 {
1516 	struct throtl_grp *tg = pd_to_tg(pd);
1517 	const char *dname = blkg_dev_name(pd->blkg);
1518 	char bufs[4][21] = { "max", "max", "max", "max" };
1519 	u64 bps_dft;
1520 	unsigned int iops_dft;
1521 	char idle_time[26] = "";
1522 	char latency_time[26] = "";
1523 
1524 	if (!dname)
1525 		return 0;
1526 
1527 	if (off == LIMIT_LOW) {
1528 		bps_dft = 0;
1529 		iops_dft = 0;
1530 	} else {
1531 		bps_dft = U64_MAX;
1532 		iops_dft = UINT_MAX;
1533 	}
1534 
1535 	if (tg->bps_conf[READ][off] == bps_dft &&
1536 	    tg->bps_conf[WRITE][off] == bps_dft &&
1537 	    tg->iops_conf[READ][off] == iops_dft &&
1538 	    tg->iops_conf[WRITE][off] == iops_dft &&
1539 	    (off != LIMIT_LOW ||
1540 	     (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1541 	      tg->latency_target_conf == DFL_LATENCY_TARGET)))
1542 		return 0;
1543 
1544 	if (tg->bps_conf[READ][off] != U64_MAX)
1545 		snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1546 			tg->bps_conf[READ][off]);
1547 	if (tg->bps_conf[WRITE][off] != U64_MAX)
1548 		snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1549 			tg->bps_conf[WRITE][off]);
1550 	if (tg->iops_conf[READ][off] != UINT_MAX)
1551 		snprintf(bufs[2], sizeof(bufs[2]), "%u",
1552 			tg->iops_conf[READ][off]);
1553 	if (tg->iops_conf[WRITE][off] != UINT_MAX)
1554 		snprintf(bufs[3], sizeof(bufs[3]), "%u",
1555 			tg->iops_conf[WRITE][off]);
1556 	if (off == LIMIT_LOW) {
1557 		if (tg->idletime_threshold_conf == ULONG_MAX)
1558 			strcpy(idle_time, " idle=max");
1559 		else
1560 			snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1561 				tg->idletime_threshold_conf);
1562 
1563 		if (tg->latency_target_conf == ULONG_MAX)
1564 			strcpy(latency_time, " latency=max");
1565 		else
1566 			snprintf(latency_time, sizeof(latency_time),
1567 				" latency=%lu", tg->latency_target_conf);
1568 	}
1569 
1570 	seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1571 		   dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1572 		   latency_time);
1573 	return 0;
1574 }
1575 
1576 static int tg_print_limit(struct seq_file *sf, void *v)
1577 {
1578 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1579 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1580 	return 0;
1581 }
1582 
1583 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1584 			  char *buf, size_t nbytes, loff_t off)
1585 {
1586 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1587 	struct blkg_conf_ctx ctx;
1588 	struct throtl_grp *tg;
1589 	u64 v[4];
1590 	unsigned long idle_time;
1591 	unsigned long latency_time;
1592 	int ret;
1593 	int index = of_cft(of)->private;
1594 
1595 	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1596 	if (ret)
1597 		return ret;
1598 
1599 	tg = blkg_to_tg(ctx.blkg);
1600 
1601 	v[0] = tg->bps_conf[READ][index];
1602 	v[1] = tg->bps_conf[WRITE][index];
1603 	v[2] = tg->iops_conf[READ][index];
1604 	v[3] = tg->iops_conf[WRITE][index];
1605 
1606 	idle_time = tg->idletime_threshold_conf;
1607 	latency_time = tg->latency_target_conf;
1608 	while (true) {
1609 		char tok[27];	/* wiops=18446744073709551616 */
1610 		char *p;
1611 		u64 val = U64_MAX;
1612 		int len;
1613 
1614 		if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1615 			break;
1616 		if (tok[0] == '\0')
1617 			break;
1618 		ctx.body += len;
1619 
1620 		ret = -EINVAL;
1621 		p = tok;
1622 		strsep(&p, "=");
1623 		if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1624 			goto out_finish;
1625 
1626 		ret = -ERANGE;
1627 		if (!val)
1628 			goto out_finish;
1629 
1630 		ret = -EINVAL;
1631 		if (!strcmp(tok, "rbps"))
1632 			v[0] = val;
1633 		else if (!strcmp(tok, "wbps"))
1634 			v[1] = val;
1635 		else if (!strcmp(tok, "riops"))
1636 			v[2] = min_t(u64, val, UINT_MAX);
1637 		else if (!strcmp(tok, "wiops"))
1638 			v[3] = min_t(u64, val, UINT_MAX);
1639 		else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1640 			idle_time = val;
1641 		else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1642 			latency_time = val;
1643 		else
1644 			goto out_finish;
1645 	}
1646 
1647 	tg->bps_conf[READ][index] = v[0];
1648 	tg->bps_conf[WRITE][index] = v[1];
1649 	tg->iops_conf[READ][index] = v[2];
1650 	tg->iops_conf[WRITE][index] = v[3];
1651 
1652 	if (index == LIMIT_MAX) {
1653 		tg->bps[READ][index] = v[0];
1654 		tg->bps[WRITE][index] = v[1];
1655 		tg->iops[READ][index] = v[2];
1656 		tg->iops[WRITE][index] = v[3];
1657 	}
1658 	tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1659 		tg->bps_conf[READ][LIMIT_MAX]);
1660 	tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1661 		tg->bps_conf[WRITE][LIMIT_MAX]);
1662 	tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1663 		tg->iops_conf[READ][LIMIT_MAX]);
1664 	tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1665 		tg->iops_conf[WRITE][LIMIT_MAX]);
1666 	tg->idletime_threshold_conf = idle_time;
1667 	tg->latency_target_conf = latency_time;
1668 
1669 	/* force user to configure all settings for low limit  */
1670 	if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1671 	      tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1672 	    tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1673 	    tg->latency_target_conf == DFL_LATENCY_TARGET) {
1674 		tg->bps[READ][LIMIT_LOW] = 0;
1675 		tg->bps[WRITE][LIMIT_LOW] = 0;
1676 		tg->iops[READ][LIMIT_LOW] = 0;
1677 		tg->iops[WRITE][LIMIT_LOW] = 0;
1678 		tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1679 		tg->latency_target = DFL_LATENCY_TARGET;
1680 	} else if (index == LIMIT_LOW) {
1681 		tg->idletime_threshold = tg->idletime_threshold_conf;
1682 		tg->latency_target = tg->latency_target_conf;
1683 	}
1684 
1685 	blk_throtl_update_limit_valid(tg->td);
1686 	if (tg->td->limit_valid[LIMIT_LOW]) {
1687 		if (index == LIMIT_LOW)
1688 			tg->td->limit_index = LIMIT_LOW;
1689 	} else
1690 		tg->td->limit_index = LIMIT_MAX;
1691 	tg_conf_updated(tg, index == LIMIT_LOW &&
1692 		tg->td->limit_valid[LIMIT_LOW]);
1693 	ret = 0;
1694 out_finish:
1695 	blkg_conf_finish(&ctx);
1696 	return ret ?: nbytes;
1697 }
1698 
1699 static struct cftype throtl_files[] = {
1700 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1701 	{
1702 		.name = "low",
1703 		.flags = CFTYPE_NOT_ON_ROOT,
1704 		.seq_show = tg_print_limit,
1705 		.write = tg_set_limit,
1706 		.private = LIMIT_LOW,
1707 	},
1708 #endif
1709 	{
1710 		.name = "max",
1711 		.flags = CFTYPE_NOT_ON_ROOT,
1712 		.seq_show = tg_print_limit,
1713 		.write = tg_set_limit,
1714 		.private = LIMIT_MAX,
1715 	},
1716 	{ }	/* terminate */
1717 };
1718 
1719 static void throtl_shutdown_wq(struct request_queue *q)
1720 {
1721 	struct throtl_data *td = q->td;
1722 
1723 	cancel_work_sync(&td->dispatch_work);
1724 }
1725 
1726 static struct blkcg_policy blkcg_policy_throtl = {
1727 	.dfl_cftypes		= throtl_files,
1728 	.legacy_cftypes		= throtl_legacy_files,
1729 
1730 	.pd_alloc_fn		= throtl_pd_alloc,
1731 	.pd_init_fn		= throtl_pd_init,
1732 	.pd_online_fn		= throtl_pd_online,
1733 	.pd_offline_fn		= throtl_pd_offline,
1734 	.pd_free_fn		= throtl_pd_free,
1735 };
1736 
1737 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1738 {
1739 	unsigned long rtime = jiffies, wtime = jiffies;
1740 
1741 	if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1742 		rtime = tg->last_low_overflow_time[READ];
1743 	if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1744 		wtime = tg->last_low_overflow_time[WRITE];
1745 	return min(rtime, wtime);
1746 }
1747 
1748 /* tg should not be an intermediate node */
1749 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1750 {
1751 	struct throtl_service_queue *parent_sq;
1752 	struct throtl_grp *parent = tg;
1753 	unsigned long ret = __tg_last_low_overflow_time(tg);
1754 
1755 	while (true) {
1756 		parent_sq = parent->service_queue.parent_sq;
1757 		parent = sq_to_tg(parent_sq);
1758 		if (!parent)
1759 			break;
1760 
1761 		/*
1762 		 * The parent doesn't have low limit, it always reaches low
1763 		 * limit. Its overflow time is useless for children
1764 		 */
1765 		if (!parent->bps[READ][LIMIT_LOW] &&
1766 		    !parent->iops[READ][LIMIT_LOW] &&
1767 		    !parent->bps[WRITE][LIMIT_LOW] &&
1768 		    !parent->iops[WRITE][LIMIT_LOW])
1769 			continue;
1770 		if (time_after(__tg_last_low_overflow_time(parent), ret))
1771 			ret = __tg_last_low_overflow_time(parent);
1772 	}
1773 	return ret;
1774 }
1775 
1776 static bool throtl_tg_is_idle(struct throtl_grp *tg)
1777 {
1778 	/*
1779 	 * cgroup is idle if:
1780 	 * - single idle is too long, longer than a fixed value (in case user
1781 	 *   configure a too big threshold) or 4 times of idletime threshold
1782 	 * - average think time is more than threshold
1783 	 * - IO latency is largely below threshold
1784 	 */
1785 	unsigned long time;
1786 	bool ret;
1787 
1788 	time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1789 	ret = tg->latency_target == DFL_LATENCY_TARGET ||
1790 	      tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1791 	      (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1792 	      tg->avg_idletime > tg->idletime_threshold ||
1793 	      (tg->latency_target && tg->bio_cnt &&
1794 		tg->bad_bio_cnt * 5 < tg->bio_cnt);
1795 	throtl_log(&tg->service_queue,
1796 		"avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1797 		tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1798 		tg->bio_cnt, ret, tg->td->scale);
1799 	return ret;
1800 }
1801 
1802 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1803 {
1804 	struct throtl_service_queue *sq = &tg->service_queue;
1805 	bool read_limit, write_limit;
1806 
1807 	/*
1808 	 * if cgroup reaches low limit (if low limit is 0, the cgroup always
1809 	 * reaches), it's ok to upgrade to next limit
1810 	 */
1811 	read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1812 	write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1813 	if (!read_limit && !write_limit)
1814 		return true;
1815 	if (read_limit && sq->nr_queued[READ] &&
1816 	    (!write_limit || sq->nr_queued[WRITE]))
1817 		return true;
1818 	if (write_limit && sq->nr_queued[WRITE] &&
1819 	    (!read_limit || sq->nr_queued[READ]))
1820 		return true;
1821 
1822 	if (time_after_eq(jiffies,
1823 		tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1824 	    throtl_tg_is_idle(tg))
1825 		return true;
1826 	return false;
1827 }
1828 
1829 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1830 {
1831 	while (true) {
1832 		if (throtl_tg_can_upgrade(tg))
1833 			return true;
1834 		tg = sq_to_tg(tg->service_queue.parent_sq);
1835 		if (!tg || !tg_to_blkg(tg)->parent)
1836 			return false;
1837 	}
1838 	return false;
1839 }
1840 
1841 static bool throtl_can_upgrade(struct throtl_data *td,
1842 	struct throtl_grp *this_tg)
1843 {
1844 	struct cgroup_subsys_state *pos_css;
1845 	struct blkcg_gq *blkg;
1846 
1847 	if (td->limit_index != LIMIT_LOW)
1848 		return false;
1849 
1850 	if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1851 		return false;
1852 
1853 	rcu_read_lock();
1854 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1855 		struct throtl_grp *tg = blkg_to_tg(blkg);
1856 
1857 		if (tg == this_tg)
1858 			continue;
1859 		if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1860 			continue;
1861 		if (!throtl_hierarchy_can_upgrade(tg)) {
1862 			rcu_read_unlock();
1863 			return false;
1864 		}
1865 	}
1866 	rcu_read_unlock();
1867 	return true;
1868 }
1869 
1870 static void throtl_upgrade_check(struct throtl_grp *tg)
1871 {
1872 	unsigned long now = jiffies;
1873 
1874 	if (tg->td->limit_index != LIMIT_LOW)
1875 		return;
1876 
1877 	if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1878 		return;
1879 
1880 	tg->last_check_time = now;
1881 
1882 	if (!time_after_eq(now,
1883 	     __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1884 		return;
1885 
1886 	if (throtl_can_upgrade(tg->td, NULL))
1887 		throtl_upgrade_state(tg->td);
1888 }
1889 
1890 static void throtl_upgrade_state(struct throtl_data *td)
1891 {
1892 	struct cgroup_subsys_state *pos_css;
1893 	struct blkcg_gq *blkg;
1894 
1895 	throtl_log(&td->service_queue, "upgrade to max");
1896 	td->limit_index = LIMIT_MAX;
1897 	td->low_upgrade_time = jiffies;
1898 	td->scale = 0;
1899 	rcu_read_lock();
1900 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1901 		struct throtl_grp *tg = blkg_to_tg(blkg);
1902 		struct throtl_service_queue *sq = &tg->service_queue;
1903 
1904 		tg->disptime = jiffies - 1;
1905 		throtl_select_dispatch(sq);
1906 		throtl_schedule_next_dispatch(sq, true);
1907 	}
1908 	rcu_read_unlock();
1909 	throtl_select_dispatch(&td->service_queue);
1910 	throtl_schedule_next_dispatch(&td->service_queue, true);
1911 	queue_work(kthrotld_workqueue, &td->dispatch_work);
1912 }
1913 
1914 static void throtl_downgrade_state(struct throtl_data *td, int new)
1915 {
1916 	td->scale /= 2;
1917 
1918 	throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1919 	if (td->scale) {
1920 		td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1921 		return;
1922 	}
1923 
1924 	td->limit_index = new;
1925 	td->low_downgrade_time = jiffies;
1926 }
1927 
1928 static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1929 {
1930 	struct throtl_data *td = tg->td;
1931 	unsigned long now = jiffies;
1932 
1933 	/*
1934 	 * If cgroup is below low limit, consider downgrade and throttle other
1935 	 * cgroups
1936 	 */
1937 	if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1938 	    time_after_eq(now, tg_last_low_overflow_time(tg) +
1939 					td->throtl_slice) &&
1940 	    (!throtl_tg_is_idle(tg) ||
1941 	     !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1942 		return true;
1943 	return false;
1944 }
1945 
1946 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1947 {
1948 	while (true) {
1949 		if (!throtl_tg_can_downgrade(tg))
1950 			return false;
1951 		tg = sq_to_tg(tg->service_queue.parent_sq);
1952 		if (!tg || !tg_to_blkg(tg)->parent)
1953 			break;
1954 	}
1955 	return true;
1956 }
1957 
1958 static void throtl_downgrade_check(struct throtl_grp *tg)
1959 {
1960 	uint64_t bps;
1961 	unsigned int iops;
1962 	unsigned long elapsed_time;
1963 	unsigned long now = jiffies;
1964 
1965 	if (tg->td->limit_index != LIMIT_MAX ||
1966 	    !tg->td->limit_valid[LIMIT_LOW])
1967 		return;
1968 	if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1969 		return;
1970 	if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1971 		return;
1972 
1973 	elapsed_time = now - tg->last_check_time;
1974 	tg->last_check_time = now;
1975 
1976 	if (time_before(now, tg_last_low_overflow_time(tg) +
1977 			tg->td->throtl_slice))
1978 		return;
1979 
1980 	if (tg->bps[READ][LIMIT_LOW]) {
1981 		bps = tg->last_bytes_disp[READ] * HZ;
1982 		do_div(bps, elapsed_time);
1983 		if (bps >= tg->bps[READ][LIMIT_LOW])
1984 			tg->last_low_overflow_time[READ] = now;
1985 	}
1986 
1987 	if (tg->bps[WRITE][LIMIT_LOW]) {
1988 		bps = tg->last_bytes_disp[WRITE] * HZ;
1989 		do_div(bps, elapsed_time);
1990 		if (bps >= tg->bps[WRITE][LIMIT_LOW])
1991 			tg->last_low_overflow_time[WRITE] = now;
1992 	}
1993 
1994 	if (tg->iops[READ][LIMIT_LOW]) {
1995 		iops = tg->last_io_disp[READ] * HZ / elapsed_time;
1996 		if (iops >= tg->iops[READ][LIMIT_LOW])
1997 			tg->last_low_overflow_time[READ] = now;
1998 	}
1999 
2000 	if (tg->iops[WRITE][LIMIT_LOW]) {
2001 		iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2002 		if (iops >= tg->iops[WRITE][LIMIT_LOW])
2003 			tg->last_low_overflow_time[WRITE] = now;
2004 	}
2005 
2006 	/*
2007 	 * If cgroup is below low limit, consider downgrade and throttle other
2008 	 * cgroups
2009 	 */
2010 	if (throtl_hierarchy_can_downgrade(tg))
2011 		throtl_downgrade_state(tg->td, LIMIT_LOW);
2012 
2013 	tg->last_bytes_disp[READ] = 0;
2014 	tg->last_bytes_disp[WRITE] = 0;
2015 	tg->last_io_disp[READ] = 0;
2016 	tg->last_io_disp[WRITE] = 0;
2017 }
2018 
2019 static void blk_throtl_update_idletime(struct throtl_grp *tg)
2020 {
2021 	unsigned long now = ktime_get_ns() >> 10;
2022 	unsigned long last_finish_time = tg->last_finish_time;
2023 
2024 	if (now <= last_finish_time || last_finish_time == 0 ||
2025 	    last_finish_time == tg->checked_last_finish_time)
2026 		return;
2027 
2028 	tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2029 	tg->checked_last_finish_time = last_finish_time;
2030 }
2031 
2032 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2033 static void throtl_update_latency_buckets(struct throtl_data *td)
2034 {
2035 	struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
2036 	int i, cpu, rw;
2037 	unsigned long last_latency[2] = { 0 };
2038 	unsigned long latency[2];
2039 
2040 	if (!blk_queue_nonrot(td->queue))
2041 		return;
2042 	if (time_before(jiffies, td->last_calculate_time + HZ))
2043 		return;
2044 	td->last_calculate_time = jiffies;
2045 
2046 	memset(avg_latency, 0, sizeof(avg_latency));
2047 	for (rw = READ; rw <= WRITE; rw++) {
2048 		for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2049 			struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
2050 
2051 			for_each_possible_cpu(cpu) {
2052 				struct latency_bucket *bucket;
2053 
2054 				/* this isn't race free, but ok in practice */
2055 				bucket = per_cpu_ptr(td->latency_buckets[rw],
2056 					cpu);
2057 				tmp->total_latency += bucket[i].total_latency;
2058 				tmp->samples += bucket[i].samples;
2059 				bucket[i].total_latency = 0;
2060 				bucket[i].samples = 0;
2061 			}
2062 
2063 			if (tmp->samples >= 32) {
2064 				int samples = tmp->samples;
2065 
2066 				latency[rw] = tmp->total_latency;
2067 
2068 				tmp->total_latency = 0;
2069 				tmp->samples = 0;
2070 				latency[rw] /= samples;
2071 				if (latency[rw] == 0)
2072 					continue;
2073 				avg_latency[rw][i].latency = latency[rw];
2074 			}
2075 		}
2076 	}
2077 
2078 	for (rw = READ; rw <= WRITE; rw++) {
2079 		for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2080 			if (!avg_latency[rw][i].latency) {
2081 				if (td->avg_buckets[rw][i].latency < last_latency[rw])
2082 					td->avg_buckets[rw][i].latency =
2083 						last_latency[rw];
2084 				continue;
2085 			}
2086 
2087 			if (!td->avg_buckets[rw][i].valid)
2088 				latency[rw] = avg_latency[rw][i].latency;
2089 			else
2090 				latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2091 					avg_latency[rw][i].latency) >> 3;
2092 
2093 			td->avg_buckets[rw][i].latency = max(latency[rw],
2094 				last_latency[rw]);
2095 			td->avg_buckets[rw][i].valid = true;
2096 			last_latency[rw] = td->avg_buckets[rw][i].latency;
2097 		}
2098 	}
2099 
2100 	for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2101 		throtl_log(&td->service_queue,
2102 			"Latency bucket %d: read latency=%ld, read valid=%d, "
2103 			"write latency=%ld, write valid=%d", i,
2104 			td->avg_buckets[READ][i].latency,
2105 			td->avg_buckets[READ][i].valid,
2106 			td->avg_buckets[WRITE][i].latency,
2107 			td->avg_buckets[WRITE][i].valid);
2108 }
2109 #else
2110 static inline void throtl_update_latency_buckets(struct throtl_data *td)
2111 {
2112 }
2113 #endif
2114 
2115 bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
2116 		    struct bio *bio)
2117 {
2118 	struct throtl_qnode *qn = NULL;
2119 	struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
2120 	struct throtl_service_queue *sq;
2121 	bool rw = bio_data_dir(bio);
2122 	bool throttled = false;
2123 	struct throtl_data *td = tg->td;
2124 
2125 	WARN_ON_ONCE(!rcu_read_lock_held());
2126 
2127 	/* see throtl_charge_bio() */
2128 	if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
2129 		goto out;
2130 
2131 	spin_lock_irq(&q->queue_lock);
2132 
2133 	throtl_update_latency_buckets(td);
2134 
2135 	blk_throtl_update_idletime(tg);
2136 
2137 	sq = &tg->service_queue;
2138 
2139 again:
2140 	while (true) {
2141 		if (tg->last_low_overflow_time[rw] == 0)
2142 			tg->last_low_overflow_time[rw] = jiffies;
2143 		throtl_downgrade_check(tg);
2144 		throtl_upgrade_check(tg);
2145 		/* throtl is FIFO - if bios are already queued, should queue */
2146 		if (sq->nr_queued[rw])
2147 			break;
2148 
2149 		/* if above limits, break to queue */
2150 		if (!tg_may_dispatch(tg, bio, NULL)) {
2151 			tg->last_low_overflow_time[rw] = jiffies;
2152 			if (throtl_can_upgrade(td, tg)) {
2153 				throtl_upgrade_state(td);
2154 				goto again;
2155 			}
2156 			break;
2157 		}
2158 
2159 		/* within limits, let's charge and dispatch directly */
2160 		throtl_charge_bio(tg, bio);
2161 
2162 		/*
2163 		 * We need to trim slice even when bios are not being queued
2164 		 * otherwise it might happen that a bio is not queued for
2165 		 * a long time and slice keeps on extending and trim is not
2166 		 * called for a long time. Now if limits are reduced suddenly
2167 		 * we take into account all the IO dispatched so far at new
2168 		 * low rate and * newly queued IO gets a really long dispatch
2169 		 * time.
2170 		 *
2171 		 * So keep on trimming slice even if bio is not queued.
2172 		 */
2173 		throtl_trim_slice(tg, rw);
2174 
2175 		/*
2176 		 * @bio passed through this layer without being throttled.
2177 		 * Climb up the ladder.  If we''re already at the top, it
2178 		 * can be executed directly.
2179 		 */
2180 		qn = &tg->qnode_on_parent[rw];
2181 		sq = sq->parent_sq;
2182 		tg = sq_to_tg(sq);
2183 		if (!tg)
2184 			goto out_unlock;
2185 	}
2186 
2187 	/* out-of-limit, queue to @tg */
2188 	throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2189 		   rw == READ ? 'R' : 'W',
2190 		   tg->bytes_disp[rw], bio->bi_iter.bi_size,
2191 		   tg_bps_limit(tg, rw),
2192 		   tg->io_disp[rw], tg_iops_limit(tg, rw),
2193 		   sq->nr_queued[READ], sq->nr_queued[WRITE]);
2194 
2195 	tg->last_low_overflow_time[rw] = jiffies;
2196 
2197 	td->nr_queued[rw]++;
2198 	throtl_add_bio_tg(bio, qn, tg);
2199 	throttled = true;
2200 
2201 	/*
2202 	 * Update @tg's dispatch time and force schedule dispatch if @tg
2203 	 * was empty before @bio.  The forced scheduling isn't likely to
2204 	 * cause undue delay as @bio is likely to be dispatched directly if
2205 	 * its @tg's disptime is not in the future.
2206 	 */
2207 	if (tg->flags & THROTL_TG_WAS_EMPTY) {
2208 		tg_update_disptime(tg);
2209 		throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2210 	}
2211 
2212 out_unlock:
2213 	spin_unlock_irq(&q->queue_lock);
2214 out:
2215 	bio_set_flag(bio, BIO_THROTTLED);
2216 
2217 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2218 	if (throttled || !td->track_bio_latency)
2219 		bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
2220 #endif
2221 	return throttled;
2222 }
2223 
2224 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2225 static void throtl_track_latency(struct throtl_data *td, sector_t size,
2226 	int op, unsigned long time)
2227 {
2228 	struct latency_bucket *latency;
2229 	int index;
2230 
2231 	if (!td || td->limit_index != LIMIT_LOW ||
2232 	    !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
2233 	    !blk_queue_nonrot(td->queue))
2234 		return;
2235 
2236 	index = request_bucket_index(size);
2237 
2238 	latency = get_cpu_ptr(td->latency_buckets[op]);
2239 	latency[index].total_latency += time;
2240 	latency[index].samples++;
2241 	put_cpu_ptr(td->latency_buckets[op]);
2242 }
2243 
2244 void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2245 {
2246 	struct request_queue *q = rq->q;
2247 	struct throtl_data *td = q->td;
2248 
2249 	throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10);
2250 }
2251 
2252 void blk_throtl_bio_endio(struct bio *bio)
2253 {
2254 	struct blkcg_gq *blkg;
2255 	struct throtl_grp *tg;
2256 	u64 finish_time_ns;
2257 	unsigned long finish_time;
2258 	unsigned long start_time;
2259 	unsigned long lat;
2260 	int rw = bio_data_dir(bio);
2261 
2262 	blkg = bio->bi_blkg;
2263 	if (!blkg)
2264 		return;
2265 	tg = blkg_to_tg(blkg);
2266 
2267 	finish_time_ns = ktime_get_ns();
2268 	tg->last_finish_time = finish_time_ns >> 10;
2269 
2270 	start_time = bio_issue_time(&bio->bi_issue) >> 10;
2271 	finish_time = __bio_issue_time(finish_time_ns) >> 10;
2272 	if (!start_time || finish_time <= start_time)
2273 		return;
2274 
2275 	lat = finish_time - start_time;
2276 	/* this is only for bio based driver */
2277 	if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2278 		throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2279 				     bio_op(bio), lat);
2280 
2281 	if (tg->latency_target && lat >= tg->td->filtered_latency) {
2282 		int bucket;
2283 		unsigned int threshold;
2284 
2285 		bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
2286 		threshold = tg->td->avg_buckets[rw][bucket].latency +
2287 			tg->latency_target;
2288 		if (lat > threshold)
2289 			tg->bad_bio_cnt++;
2290 		/*
2291 		 * Not race free, could get wrong count, which means cgroups
2292 		 * will be throttled
2293 		 */
2294 		tg->bio_cnt++;
2295 	}
2296 
2297 	if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2298 		tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2299 		tg->bio_cnt /= 2;
2300 		tg->bad_bio_cnt /= 2;
2301 	}
2302 }
2303 #endif
2304 
2305 /*
2306  * Dispatch all bios from all children tg's queued on @parent_sq.  On
2307  * return, @parent_sq is guaranteed to not have any active children tg's
2308  * and all bios from previously active tg's are on @parent_sq->bio_lists[].
2309  */
2310 static void tg_drain_bios(struct throtl_service_queue *parent_sq)
2311 {
2312 	struct throtl_grp *tg;
2313 
2314 	while ((tg = throtl_rb_first(parent_sq))) {
2315 		struct throtl_service_queue *sq = &tg->service_queue;
2316 		struct bio *bio;
2317 
2318 		throtl_dequeue_tg(tg);
2319 
2320 		while ((bio = throtl_peek_queued(&sq->queued[READ])))
2321 			tg_dispatch_one_bio(tg, bio_data_dir(bio));
2322 		while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2323 			tg_dispatch_one_bio(tg, bio_data_dir(bio));
2324 	}
2325 }
2326 
2327 /**
2328  * blk_throtl_drain - drain throttled bios
2329  * @q: request_queue to drain throttled bios for
2330  *
2331  * Dispatch all currently throttled bios on @q through ->make_request_fn().
2332  */
2333 void blk_throtl_drain(struct request_queue *q)
2334 	__releases(&q->queue_lock) __acquires(&q->queue_lock)
2335 {
2336 	struct throtl_data *td = q->td;
2337 	struct blkcg_gq *blkg;
2338 	struct cgroup_subsys_state *pos_css;
2339 	struct bio *bio;
2340 	int rw;
2341 
2342 	rcu_read_lock();
2343 
2344 	/*
2345 	 * Drain each tg while doing post-order walk on the blkg tree, so
2346 	 * that all bios are propagated to td->service_queue.  It'd be
2347 	 * better to walk service_queue tree directly but blkg walk is
2348 	 * easier.
2349 	 */
2350 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2351 		tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
2352 
2353 	/* finally, transfer bios from top-level tg's into the td */
2354 	tg_drain_bios(&td->service_queue);
2355 
2356 	rcu_read_unlock();
2357 	spin_unlock_irq(&q->queue_lock);
2358 
2359 	/* all bios now should be in td->service_queue, issue them */
2360 	for (rw = READ; rw <= WRITE; rw++)
2361 		while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
2362 						NULL)))
2363 			generic_make_request(bio);
2364 
2365 	spin_lock_irq(&q->queue_lock);
2366 }
2367 
2368 int blk_throtl_init(struct request_queue *q)
2369 {
2370 	struct throtl_data *td;
2371 	int ret;
2372 
2373 	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2374 	if (!td)
2375 		return -ENOMEM;
2376 	td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
2377 		LATENCY_BUCKET_SIZE, __alignof__(u64));
2378 	if (!td->latency_buckets[READ]) {
2379 		kfree(td);
2380 		return -ENOMEM;
2381 	}
2382 	td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2383 		LATENCY_BUCKET_SIZE, __alignof__(u64));
2384 	if (!td->latency_buckets[WRITE]) {
2385 		free_percpu(td->latency_buckets[READ]);
2386 		kfree(td);
2387 		return -ENOMEM;
2388 	}
2389 
2390 	INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2391 	throtl_service_queue_init(&td->service_queue);
2392 
2393 	q->td = td;
2394 	td->queue = q;
2395 
2396 	td->limit_valid[LIMIT_MAX] = true;
2397 	td->limit_index = LIMIT_MAX;
2398 	td->low_upgrade_time = jiffies;
2399 	td->low_downgrade_time = jiffies;
2400 
2401 	/* activate policy */
2402 	ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2403 	if (ret) {
2404 		free_percpu(td->latency_buckets[READ]);
2405 		free_percpu(td->latency_buckets[WRITE]);
2406 		kfree(td);
2407 	}
2408 	return ret;
2409 }
2410 
2411 void blk_throtl_exit(struct request_queue *q)
2412 {
2413 	BUG_ON(!q->td);
2414 	throtl_shutdown_wq(q);
2415 	blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2416 	free_percpu(q->td->latency_buckets[READ]);
2417 	free_percpu(q->td->latency_buckets[WRITE]);
2418 	kfree(q->td);
2419 }
2420 
2421 void blk_throtl_register_queue(struct request_queue *q)
2422 {
2423 	struct throtl_data *td;
2424 	int i;
2425 
2426 	td = q->td;
2427 	BUG_ON(!td);
2428 
2429 	if (blk_queue_nonrot(q)) {
2430 		td->throtl_slice = DFL_THROTL_SLICE_SSD;
2431 		td->filtered_latency = LATENCY_FILTERED_SSD;
2432 	} else {
2433 		td->throtl_slice = DFL_THROTL_SLICE_HD;
2434 		td->filtered_latency = LATENCY_FILTERED_HD;
2435 		for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2436 			td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
2437 			td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
2438 		}
2439 	}
2440 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2441 	/* if no low limit, use previous default */
2442 	td->throtl_slice = DFL_THROTL_SLICE_HD;
2443 #endif
2444 
2445 	td->track_bio_latency = !queue_is_mq(q);
2446 	if (!td->track_bio_latency)
2447 		blk_stat_enable_accounting(q);
2448 }
2449 
2450 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2451 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2452 {
2453 	if (!q->td)
2454 		return -EINVAL;
2455 	return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2456 }
2457 
2458 ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2459 	const char *page, size_t count)
2460 {
2461 	unsigned long v;
2462 	unsigned long t;
2463 
2464 	if (!q->td)
2465 		return -EINVAL;
2466 	if (kstrtoul(page, 10, &v))
2467 		return -EINVAL;
2468 	t = msecs_to_jiffies(v);
2469 	if (t == 0 || t > MAX_THROTL_SLICE)
2470 		return -EINVAL;
2471 	q->td->throtl_slice = t;
2472 	return count;
2473 }
2474 #endif
2475 
2476 static int __init throtl_init(void)
2477 {
2478 	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2479 	if (!kthrotld_workqueue)
2480 		panic("Failed to create kthrotld\n");
2481 
2482 	return blkcg_policy_register(&blkcg_policy_throtl);
2483 }
2484 
2485 module_init(throtl_init);
2486