xref: /openbmc/linux/block/blk-throttle.c (revision d4092d76)
1 /*
2  * Interface for controlling IO bandwidth on a request queue
3  *
4  * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5  */
6 
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/blk-cgroup.h>
13 #include "blk.h"
14 
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum = 8;
17 
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum = 32;
20 
21 /* Throttling is performed over a slice and after that slice is renewed */
22 #define DFL_THROTL_SLICE_HD (HZ / 10)
23 #define DFL_THROTL_SLICE_SSD (HZ / 50)
24 #define MAX_THROTL_SLICE (HZ)
25 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
26 #define MIN_THROTL_BPS (320 * 1024)
27 #define MIN_THROTL_IOPS (10)
28 #define DFL_LATENCY_TARGET (-1L)
29 #define DFL_IDLE_THRESHOLD (0)
30 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
31 #define LATENCY_FILTERED_SSD (0)
32 /*
33  * For HD, very small latency comes from sequential IO. Such IO is helpless to
34  * help determine if its IO is impacted by others, hence we ignore the IO
35  */
36 #define LATENCY_FILTERED_HD (1000L) /* 1ms */
37 
38 #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
39 
40 static struct blkcg_policy blkcg_policy_throtl;
41 
42 /* A workqueue to queue throttle related work */
43 static struct workqueue_struct *kthrotld_workqueue;
44 
45 /*
46  * To implement hierarchical throttling, throtl_grps form a tree and bios
47  * are dispatched upwards level by level until they reach the top and get
48  * issued.  When dispatching bios from the children and local group at each
49  * level, if the bios are dispatched into a single bio_list, there's a risk
50  * of a local or child group which can queue many bios at once filling up
51  * the list starving others.
52  *
53  * To avoid such starvation, dispatched bios are queued separately
54  * according to where they came from.  When they are again dispatched to
55  * the parent, they're popped in round-robin order so that no single source
56  * hogs the dispatch window.
57  *
58  * throtl_qnode is used to keep the queued bios separated by their sources.
59  * Bios are queued to throtl_qnode which in turn is queued to
60  * throtl_service_queue and then dispatched in round-robin order.
61  *
62  * It's also used to track the reference counts on blkg's.  A qnode always
63  * belongs to a throtl_grp and gets queued on itself or the parent, so
64  * incrementing the reference of the associated throtl_grp when a qnode is
65  * queued and decrementing when dequeued is enough to keep the whole blkg
66  * tree pinned while bios are in flight.
67  */
68 struct throtl_qnode {
69 	struct list_head	node;		/* service_queue->queued[] */
70 	struct bio_list		bios;		/* queued bios */
71 	struct throtl_grp	*tg;		/* tg this qnode belongs to */
72 };
73 
74 struct throtl_service_queue {
75 	struct throtl_service_queue *parent_sq;	/* the parent service_queue */
76 
77 	/*
78 	 * Bios queued directly to this service_queue or dispatched from
79 	 * children throtl_grp's.
80 	 */
81 	struct list_head	queued[2];	/* throtl_qnode [READ/WRITE] */
82 	unsigned int		nr_queued[2];	/* number of queued bios */
83 
84 	/*
85 	 * RB tree of active children throtl_grp's, which are sorted by
86 	 * their ->disptime.
87 	 */
88 	struct rb_root		pending_tree;	/* RB tree of active tgs */
89 	struct rb_node		*first_pending;	/* first node in the tree */
90 	unsigned int		nr_pending;	/* # queued in the tree */
91 	unsigned long		first_pending_disptime;	/* disptime of the first tg */
92 	struct timer_list	pending_timer;	/* fires on first_pending_disptime */
93 };
94 
95 enum tg_state_flags {
96 	THROTL_TG_PENDING	= 1 << 0,	/* on parent's pending tree */
97 	THROTL_TG_WAS_EMPTY	= 1 << 1,	/* bio_lists[] became non-empty */
98 };
99 
100 #define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
101 
102 enum {
103 	LIMIT_LOW,
104 	LIMIT_MAX,
105 	LIMIT_CNT,
106 };
107 
108 struct throtl_grp {
109 	/* must be the first member */
110 	struct blkg_policy_data pd;
111 
112 	/* active throtl group service_queue member */
113 	struct rb_node rb_node;
114 
115 	/* throtl_data this group belongs to */
116 	struct throtl_data *td;
117 
118 	/* this group's service queue */
119 	struct throtl_service_queue service_queue;
120 
121 	/*
122 	 * qnode_on_self is used when bios are directly queued to this
123 	 * throtl_grp so that local bios compete fairly with bios
124 	 * dispatched from children.  qnode_on_parent is used when bios are
125 	 * dispatched from this throtl_grp into its parent and will compete
126 	 * with the sibling qnode_on_parents and the parent's
127 	 * qnode_on_self.
128 	 */
129 	struct throtl_qnode qnode_on_self[2];
130 	struct throtl_qnode qnode_on_parent[2];
131 
132 	/*
133 	 * Dispatch time in jiffies. This is the estimated time when group
134 	 * will unthrottle and is ready to dispatch more bio. It is used as
135 	 * key to sort active groups in service tree.
136 	 */
137 	unsigned long disptime;
138 
139 	unsigned int flags;
140 
141 	/* are there any throtl rules between this group and td? */
142 	bool has_rules[2];
143 
144 	/* internally used bytes per second rate limits */
145 	uint64_t bps[2][LIMIT_CNT];
146 	/* user configured bps limits */
147 	uint64_t bps_conf[2][LIMIT_CNT];
148 
149 	/* internally used IOPS limits */
150 	unsigned int iops[2][LIMIT_CNT];
151 	/* user configured IOPS limits */
152 	unsigned int iops_conf[2][LIMIT_CNT];
153 
154 	/* Number of bytes disptached in current slice */
155 	uint64_t bytes_disp[2];
156 	/* Number of bio's dispatched in current slice */
157 	unsigned int io_disp[2];
158 
159 	unsigned long last_low_overflow_time[2];
160 
161 	uint64_t last_bytes_disp[2];
162 	unsigned int last_io_disp[2];
163 
164 	unsigned long last_check_time;
165 
166 	unsigned long latency_target; /* us */
167 	unsigned long latency_target_conf; /* us */
168 	/* When did we start a new slice */
169 	unsigned long slice_start[2];
170 	unsigned long slice_end[2];
171 
172 	unsigned long last_finish_time; /* ns / 1024 */
173 	unsigned long checked_last_finish_time; /* ns / 1024 */
174 	unsigned long avg_idletime; /* ns / 1024 */
175 	unsigned long idletime_threshold; /* us */
176 	unsigned long idletime_threshold_conf; /* us */
177 
178 	unsigned int bio_cnt; /* total bios */
179 	unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
180 	unsigned long bio_cnt_reset_time;
181 };
182 
183 /* We measure latency for request size from <= 4k to >= 1M */
184 #define LATENCY_BUCKET_SIZE 9
185 
186 struct latency_bucket {
187 	unsigned long total_latency; /* ns / 1024 */
188 	int samples;
189 };
190 
191 struct avg_latency_bucket {
192 	unsigned long latency; /* ns / 1024 */
193 	bool valid;
194 };
195 
196 struct throtl_data
197 {
198 	/* service tree for active throtl groups */
199 	struct throtl_service_queue service_queue;
200 
201 	struct request_queue *queue;
202 
203 	/* Total Number of queued bios on READ and WRITE lists */
204 	unsigned int nr_queued[2];
205 
206 	unsigned int throtl_slice;
207 
208 	/* Work for dispatching throttled bios */
209 	struct work_struct dispatch_work;
210 	unsigned int limit_index;
211 	bool limit_valid[LIMIT_CNT];
212 
213 	unsigned long low_upgrade_time;
214 	unsigned long low_downgrade_time;
215 
216 	unsigned int scale;
217 
218 	struct latency_bucket tmp_buckets[LATENCY_BUCKET_SIZE];
219 	struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
220 	struct latency_bucket __percpu *latency_buckets;
221 	unsigned long last_calculate_time;
222 	unsigned long filtered_latency;
223 
224 	bool track_bio_latency;
225 };
226 
227 static void throtl_pending_timer_fn(unsigned long arg);
228 
229 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
230 {
231 	return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
232 }
233 
234 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
235 {
236 	return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
237 }
238 
239 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
240 {
241 	return pd_to_blkg(&tg->pd);
242 }
243 
244 /**
245  * sq_to_tg - return the throl_grp the specified service queue belongs to
246  * @sq: the throtl_service_queue of interest
247  *
248  * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
249  * embedded in throtl_data, %NULL is returned.
250  */
251 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
252 {
253 	if (sq && sq->parent_sq)
254 		return container_of(sq, struct throtl_grp, service_queue);
255 	else
256 		return NULL;
257 }
258 
259 /**
260  * sq_to_td - return throtl_data the specified service queue belongs to
261  * @sq: the throtl_service_queue of interest
262  *
263  * A service_queue can be embedded in either a throtl_grp or throtl_data.
264  * Determine the associated throtl_data accordingly and return it.
265  */
266 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
267 {
268 	struct throtl_grp *tg = sq_to_tg(sq);
269 
270 	if (tg)
271 		return tg->td;
272 	else
273 		return container_of(sq, struct throtl_data, service_queue);
274 }
275 
276 /*
277  * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
278  * make the IO dispatch more smooth.
279  * Scale up: linearly scale up according to lapsed time since upgrade. For
280  *           every throtl_slice, the limit scales up 1/2 .low limit till the
281  *           limit hits .max limit
282  * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
283  */
284 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
285 {
286 	/* arbitrary value to avoid too big scale */
287 	if (td->scale < 4096 && time_after_eq(jiffies,
288 	    td->low_upgrade_time + td->scale * td->throtl_slice))
289 		td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
290 
291 	return low + (low >> 1) * td->scale;
292 }
293 
294 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
295 {
296 	struct blkcg_gq *blkg = tg_to_blkg(tg);
297 	struct throtl_data *td;
298 	uint64_t ret;
299 
300 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
301 		return U64_MAX;
302 
303 	td = tg->td;
304 	ret = tg->bps[rw][td->limit_index];
305 	if (ret == 0 && td->limit_index == LIMIT_LOW) {
306 		/* intermediate node or iops isn't 0 */
307 		if (!list_empty(&blkg->blkcg->css.children) ||
308 		    tg->iops[rw][td->limit_index])
309 			return U64_MAX;
310 		else
311 			return MIN_THROTL_BPS;
312 	}
313 
314 	if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
315 	    tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
316 		uint64_t adjusted;
317 
318 		adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
319 		ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
320 	}
321 	return ret;
322 }
323 
324 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
325 {
326 	struct blkcg_gq *blkg = tg_to_blkg(tg);
327 	struct throtl_data *td;
328 	unsigned int ret;
329 
330 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
331 		return UINT_MAX;
332 
333 	td = tg->td;
334 	ret = tg->iops[rw][td->limit_index];
335 	if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
336 		/* intermediate node or bps isn't 0 */
337 		if (!list_empty(&blkg->blkcg->css.children) ||
338 		    tg->bps[rw][td->limit_index])
339 			return UINT_MAX;
340 		else
341 			return MIN_THROTL_IOPS;
342 	}
343 
344 	if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
345 	    tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
346 		uint64_t adjusted;
347 
348 		adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
349 		if (adjusted > UINT_MAX)
350 			adjusted = UINT_MAX;
351 		ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
352 	}
353 	return ret;
354 }
355 
356 #define request_bucket_index(sectors) \
357 	clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
358 
359 /**
360  * throtl_log - log debug message via blktrace
361  * @sq: the service_queue being reported
362  * @fmt: printf format string
363  * @args: printf args
364  *
365  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
366  * throtl_grp; otherwise, just "throtl".
367  */
368 #define throtl_log(sq, fmt, args...)	do {				\
369 	struct throtl_grp *__tg = sq_to_tg((sq));			\
370 	struct throtl_data *__td = sq_to_td((sq));			\
371 									\
372 	(void)__td;							\
373 	if (likely(!blk_trace_note_message_enabled(__td->queue)))	\
374 		break;							\
375 	if ((__tg)) {							\
376 		char __pbuf[128];					\
377 									\
378 		blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf));	\
379 		blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
380 	} else {							\
381 		blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);	\
382 	}								\
383 } while (0)
384 
385 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
386 {
387 	INIT_LIST_HEAD(&qn->node);
388 	bio_list_init(&qn->bios);
389 	qn->tg = tg;
390 }
391 
392 /**
393  * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
394  * @bio: bio being added
395  * @qn: qnode to add bio to
396  * @queued: the service_queue->queued[] list @qn belongs to
397  *
398  * Add @bio to @qn and put @qn on @queued if it's not already on.
399  * @qn->tg's reference count is bumped when @qn is activated.  See the
400  * comment on top of throtl_qnode definition for details.
401  */
402 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
403 				 struct list_head *queued)
404 {
405 	bio_list_add(&qn->bios, bio);
406 	if (list_empty(&qn->node)) {
407 		list_add_tail(&qn->node, queued);
408 		blkg_get(tg_to_blkg(qn->tg));
409 	}
410 }
411 
412 /**
413  * throtl_peek_queued - peek the first bio on a qnode list
414  * @queued: the qnode list to peek
415  */
416 static struct bio *throtl_peek_queued(struct list_head *queued)
417 {
418 	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
419 	struct bio *bio;
420 
421 	if (list_empty(queued))
422 		return NULL;
423 
424 	bio = bio_list_peek(&qn->bios);
425 	WARN_ON_ONCE(!bio);
426 	return bio;
427 }
428 
429 /**
430  * throtl_pop_queued - pop the first bio form a qnode list
431  * @queued: the qnode list to pop a bio from
432  * @tg_to_put: optional out argument for throtl_grp to put
433  *
434  * Pop the first bio from the qnode list @queued.  After popping, the first
435  * qnode is removed from @queued if empty or moved to the end of @queued so
436  * that the popping order is round-robin.
437  *
438  * When the first qnode is removed, its associated throtl_grp should be put
439  * too.  If @tg_to_put is NULL, this function automatically puts it;
440  * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
441  * responsible for putting it.
442  */
443 static struct bio *throtl_pop_queued(struct list_head *queued,
444 				     struct throtl_grp **tg_to_put)
445 {
446 	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
447 	struct bio *bio;
448 
449 	if (list_empty(queued))
450 		return NULL;
451 
452 	bio = bio_list_pop(&qn->bios);
453 	WARN_ON_ONCE(!bio);
454 
455 	if (bio_list_empty(&qn->bios)) {
456 		list_del_init(&qn->node);
457 		if (tg_to_put)
458 			*tg_to_put = qn->tg;
459 		else
460 			blkg_put(tg_to_blkg(qn->tg));
461 	} else {
462 		list_move_tail(&qn->node, queued);
463 	}
464 
465 	return bio;
466 }
467 
468 /* init a service_queue, assumes the caller zeroed it */
469 static void throtl_service_queue_init(struct throtl_service_queue *sq)
470 {
471 	INIT_LIST_HEAD(&sq->queued[0]);
472 	INIT_LIST_HEAD(&sq->queued[1]);
473 	sq->pending_tree = RB_ROOT;
474 	setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
475 		    (unsigned long)sq);
476 }
477 
478 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
479 {
480 	struct throtl_grp *tg;
481 	int rw;
482 
483 	tg = kzalloc_node(sizeof(*tg), gfp, node);
484 	if (!tg)
485 		return NULL;
486 
487 	throtl_service_queue_init(&tg->service_queue);
488 
489 	for (rw = READ; rw <= WRITE; rw++) {
490 		throtl_qnode_init(&tg->qnode_on_self[rw], tg);
491 		throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
492 	}
493 
494 	RB_CLEAR_NODE(&tg->rb_node);
495 	tg->bps[READ][LIMIT_MAX] = U64_MAX;
496 	tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
497 	tg->iops[READ][LIMIT_MAX] = UINT_MAX;
498 	tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
499 	tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
500 	tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
501 	tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
502 	tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
503 	/* LIMIT_LOW will have default value 0 */
504 
505 	tg->latency_target = DFL_LATENCY_TARGET;
506 	tg->latency_target_conf = DFL_LATENCY_TARGET;
507 	tg->idletime_threshold = DFL_IDLE_THRESHOLD;
508 	tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
509 
510 	return &tg->pd;
511 }
512 
513 static void throtl_pd_init(struct blkg_policy_data *pd)
514 {
515 	struct throtl_grp *tg = pd_to_tg(pd);
516 	struct blkcg_gq *blkg = tg_to_blkg(tg);
517 	struct throtl_data *td = blkg->q->td;
518 	struct throtl_service_queue *sq = &tg->service_queue;
519 
520 	/*
521 	 * If on the default hierarchy, we switch to properly hierarchical
522 	 * behavior where limits on a given throtl_grp are applied to the
523 	 * whole subtree rather than just the group itself.  e.g. If 16M
524 	 * read_bps limit is set on the root group, the whole system can't
525 	 * exceed 16M for the device.
526 	 *
527 	 * If not on the default hierarchy, the broken flat hierarchy
528 	 * behavior is retained where all throtl_grps are treated as if
529 	 * they're all separate root groups right below throtl_data.
530 	 * Limits of a group don't interact with limits of other groups
531 	 * regardless of the position of the group in the hierarchy.
532 	 */
533 	sq->parent_sq = &td->service_queue;
534 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
535 		sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
536 	tg->td = td;
537 }
538 
539 /*
540  * Set has_rules[] if @tg or any of its parents have limits configured.
541  * This doesn't require walking up to the top of the hierarchy as the
542  * parent's has_rules[] is guaranteed to be correct.
543  */
544 static void tg_update_has_rules(struct throtl_grp *tg)
545 {
546 	struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
547 	struct throtl_data *td = tg->td;
548 	int rw;
549 
550 	for (rw = READ; rw <= WRITE; rw++)
551 		tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
552 			(td->limit_valid[td->limit_index] &&
553 			 (tg_bps_limit(tg, rw) != U64_MAX ||
554 			  tg_iops_limit(tg, rw) != UINT_MAX));
555 }
556 
557 static void throtl_pd_online(struct blkg_policy_data *pd)
558 {
559 	struct throtl_grp *tg = pd_to_tg(pd);
560 	/*
561 	 * We don't want new groups to escape the limits of its ancestors.
562 	 * Update has_rules[] after a new group is brought online.
563 	 */
564 	tg_update_has_rules(tg);
565 }
566 
567 static void blk_throtl_update_limit_valid(struct throtl_data *td)
568 {
569 	struct cgroup_subsys_state *pos_css;
570 	struct blkcg_gq *blkg;
571 	bool low_valid = false;
572 
573 	rcu_read_lock();
574 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
575 		struct throtl_grp *tg = blkg_to_tg(blkg);
576 
577 		if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
578 		    tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
579 			low_valid = true;
580 	}
581 	rcu_read_unlock();
582 
583 	td->limit_valid[LIMIT_LOW] = low_valid;
584 }
585 
586 static void throtl_upgrade_state(struct throtl_data *td);
587 static void throtl_pd_offline(struct blkg_policy_data *pd)
588 {
589 	struct throtl_grp *tg = pd_to_tg(pd);
590 
591 	tg->bps[READ][LIMIT_LOW] = 0;
592 	tg->bps[WRITE][LIMIT_LOW] = 0;
593 	tg->iops[READ][LIMIT_LOW] = 0;
594 	tg->iops[WRITE][LIMIT_LOW] = 0;
595 
596 	blk_throtl_update_limit_valid(tg->td);
597 
598 	if (!tg->td->limit_valid[tg->td->limit_index])
599 		throtl_upgrade_state(tg->td);
600 }
601 
602 static void throtl_pd_free(struct blkg_policy_data *pd)
603 {
604 	struct throtl_grp *tg = pd_to_tg(pd);
605 
606 	del_timer_sync(&tg->service_queue.pending_timer);
607 	kfree(tg);
608 }
609 
610 static struct throtl_grp *
611 throtl_rb_first(struct throtl_service_queue *parent_sq)
612 {
613 	/* Service tree is empty */
614 	if (!parent_sq->nr_pending)
615 		return NULL;
616 
617 	if (!parent_sq->first_pending)
618 		parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
619 
620 	if (parent_sq->first_pending)
621 		return rb_entry_tg(parent_sq->first_pending);
622 
623 	return NULL;
624 }
625 
626 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
627 {
628 	rb_erase(n, root);
629 	RB_CLEAR_NODE(n);
630 }
631 
632 static void throtl_rb_erase(struct rb_node *n,
633 			    struct throtl_service_queue *parent_sq)
634 {
635 	if (parent_sq->first_pending == n)
636 		parent_sq->first_pending = NULL;
637 	rb_erase_init(n, &parent_sq->pending_tree);
638 	--parent_sq->nr_pending;
639 }
640 
641 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
642 {
643 	struct throtl_grp *tg;
644 
645 	tg = throtl_rb_first(parent_sq);
646 	if (!tg)
647 		return;
648 
649 	parent_sq->first_pending_disptime = tg->disptime;
650 }
651 
652 static void tg_service_queue_add(struct throtl_grp *tg)
653 {
654 	struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
655 	struct rb_node **node = &parent_sq->pending_tree.rb_node;
656 	struct rb_node *parent = NULL;
657 	struct throtl_grp *__tg;
658 	unsigned long key = tg->disptime;
659 	int left = 1;
660 
661 	while (*node != NULL) {
662 		parent = *node;
663 		__tg = rb_entry_tg(parent);
664 
665 		if (time_before(key, __tg->disptime))
666 			node = &parent->rb_left;
667 		else {
668 			node = &parent->rb_right;
669 			left = 0;
670 		}
671 	}
672 
673 	if (left)
674 		parent_sq->first_pending = &tg->rb_node;
675 
676 	rb_link_node(&tg->rb_node, parent, node);
677 	rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
678 }
679 
680 static void __throtl_enqueue_tg(struct throtl_grp *tg)
681 {
682 	tg_service_queue_add(tg);
683 	tg->flags |= THROTL_TG_PENDING;
684 	tg->service_queue.parent_sq->nr_pending++;
685 }
686 
687 static void throtl_enqueue_tg(struct throtl_grp *tg)
688 {
689 	if (!(tg->flags & THROTL_TG_PENDING))
690 		__throtl_enqueue_tg(tg);
691 }
692 
693 static void __throtl_dequeue_tg(struct throtl_grp *tg)
694 {
695 	throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
696 	tg->flags &= ~THROTL_TG_PENDING;
697 }
698 
699 static void throtl_dequeue_tg(struct throtl_grp *tg)
700 {
701 	if (tg->flags & THROTL_TG_PENDING)
702 		__throtl_dequeue_tg(tg);
703 }
704 
705 /* Call with queue lock held */
706 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
707 					  unsigned long expires)
708 {
709 	unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
710 
711 	/*
712 	 * Since we are adjusting the throttle limit dynamically, the sleep
713 	 * time calculated according to previous limit might be invalid. It's
714 	 * possible the cgroup sleep time is very long and no other cgroups
715 	 * have IO running so notify the limit changes. Make sure the cgroup
716 	 * doesn't sleep too long to avoid the missed notification.
717 	 */
718 	if (time_after(expires, max_expire))
719 		expires = max_expire;
720 	mod_timer(&sq->pending_timer, expires);
721 	throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
722 		   expires - jiffies, jiffies);
723 }
724 
725 /**
726  * throtl_schedule_next_dispatch - schedule the next dispatch cycle
727  * @sq: the service_queue to schedule dispatch for
728  * @force: force scheduling
729  *
730  * Arm @sq->pending_timer so that the next dispatch cycle starts on the
731  * dispatch time of the first pending child.  Returns %true if either timer
732  * is armed or there's no pending child left.  %false if the current
733  * dispatch window is still open and the caller should continue
734  * dispatching.
735  *
736  * If @force is %true, the dispatch timer is always scheduled and this
737  * function is guaranteed to return %true.  This is to be used when the
738  * caller can't dispatch itself and needs to invoke pending_timer
739  * unconditionally.  Note that forced scheduling is likely to induce short
740  * delay before dispatch starts even if @sq->first_pending_disptime is not
741  * in the future and thus shouldn't be used in hot paths.
742  */
743 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
744 					  bool force)
745 {
746 	/* any pending children left? */
747 	if (!sq->nr_pending)
748 		return true;
749 
750 	update_min_dispatch_time(sq);
751 
752 	/* is the next dispatch time in the future? */
753 	if (force || time_after(sq->first_pending_disptime, jiffies)) {
754 		throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
755 		return true;
756 	}
757 
758 	/* tell the caller to continue dispatching */
759 	return false;
760 }
761 
762 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
763 		bool rw, unsigned long start)
764 {
765 	tg->bytes_disp[rw] = 0;
766 	tg->io_disp[rw] = 0;
767 
768 	/*
769 	 * Previous slice has expired. We must have trimmed it after last
770 	 * bio dispatch. That means since start of last slice, we never used
771 	 * that bandwidth. Do try to make use of that bandwidth while giving
772 	 * credit.
773 	 */
774 	if (time_after_eq(start, tg->slice_start[rw]))
775 		tg->slice_start[rw] = start;
776 
777 	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
778 	throtl_log(&tg->service_queue,
779 		   "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
780 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
781 		   tg->slice_end[rw], jiffies);
782 }
783 
784 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
785 {
786 	tg->bytes_disp[rw] = 0;
787 	tg->io_disp[rw] = 0;
788 	tg->slice_start[rw] = jiffies;
789 	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
790 	throtl_log(&tg->service_queue,
791 		   "[%c] new slice start=%lu end=%lu jiffies=%lu",
792 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
793 		   tg->slice_end[rw], jiffies);
794 }
795 
796 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
797 					unsigned long jiffy_end)
798 {
799 	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
800 }
801 
802 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
803 				       unsigned long jiffy_end)
804 {
805 	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
806 	throtl_log(&tg->service_queue,
807 		   "[%c] extend slice start=%lu end=%lu jiffies=%lu",
808 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
809 		   tg->slice_end[rw], jiffies);
810 }
811 
812 /* Determine if previously allocated or extended slice is complete or not */
813 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
814 {
815 	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
816 		return false;
817 
818 	return 1;
819 }
820 
821 /* Trim the used slices and adjust slice start accordingly */
822 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
823 {
824 	unsigned long nr_slices, time_elapsed, io_trim;
825 	u64 bytes_trim, tmp;
826 
827 	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
828 
829 	/*
830 	 * If bps are unlimited (-1), then time slice don't get
831 	 * renewed. Don't try to trim the slice if slice is used. A new
832 	 * slice will start when appropriate.
833 	 */
834 	if (throtl_slice_used(tg, rw))
835 		return;
836 
837 	/*
838 	 * A bio has been dispatched. Also adjust slice_end. It might happen
839 	 * that initially cgroup limit was very low resulting in high
840 	 * slice_end, but later limit was bumped up and bio was dispached
841 	 * sooner, then we need to reduce slice_end. A high bogus slice_end
842 	 * is bad because it does not allow new slice to start.
843 	 */
844 
845 	throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
846 
847 	time_elapsed = jiffies - tg->slice_start[rw];
848 
849 	nr_slices = time_elapsed / tg->td->throtl_slice;
850 
851 	if (!nr_slices)
852 		return;
853 	tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
854 	do_div(tmp, HZ);
855 	bytes_trim = tmp;
856 
857 	io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
858 		HZ;
859 
860 	if (!bytes_trim && !io_trim)
861 		return;
862 
863 	if (tg->bytes_disp[rw] >= bytes_trim)
864 		tg->bytes_disp[rw] -= bytes_trim;
865 	else
866 		tg->bytes_disp[rw] = 0;
867 
868 	if (tg->io_disp[rw] >= io_trim)
869 		tg->io_disp[rw] -= io_trim;
870 	else
871 		tg->io_disp[rw] = 0;
872 
873 	tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
874 
875 	throtl_log(&tg->service_queue,
876 		   "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
877 		   rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
878 		   tg->slice_start[rw], tg->slice_end[rw], jiffies);
879 }
880 
881 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
882 				  unsigned long *wait)
883 {
884 	bool rw = bio_data_dir(bio);
885 	unsigned int io_allowed;
886 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
887 	u64 tmp;
888 
889 	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
890 
891 	/* Slice has just started. Consider one slice interval */
892 	if (!jiffy_elapsed)
893 		jiffy_elapsed_rnd = tg->td->throtl_slice;
894 
895 	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
896 
897 	/*
898 	 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
899 	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
900 	 * will allow dispatch after 1 second and after that slice should
901 	 * have been trimmed.
902 	 */
903 
904 	tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
905 	do_div(tmp, HZ);
906 
907 	if (tmp > UINT_MAX)
908 		io_allowed = UINT_MAX;
909 	else
910 		io_allowed = tmp;
911 
912 	if (tg->io_disp[rw] + 1 <= io_allowed) {
913 		if (wait)
914 			*wait = 0;
915 		return true;
916 	}
917 
918 	/* Calc approx time to dispatch */
919 	jiffy_wait = ((tg->io_disp[rw] + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
920 
921 	if (jiffy_wait > jiffy_elapsed)
922 		jiffy_wait = jiffy_wait - jiffy_elapsed;
923 	else
924 		jiffy_wait = 1;
925 
926 	if (wait)
927 		*wait = jiffy_wait;
928 	return 0;
929 }
930 
931 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
932 				 unsigned long *wait)
933 {
934 	bool rw = bio_data_dir(bio);
935 	u64 bytes_allowed, extra_bytes, tmp;
936 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
937 
938 	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
939 
940 	/* Slice has just started. Consider one slice interval */
941 	if (!jiffy_elapsed)
942 		jiffy_elapsed_rnd = tg->td->throtl_slice;
943 
944 	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
945 
946 	tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
947 	do_div(tmp, HZ);
948 	bytes_allowed = tmp;
949 
950 	if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
951 		if (wait)
952 			*wait = 0;
953 		return true;
954 	}
955 
956 	/* Calc approx time to dispatch */
957 	extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
958 	jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
959 
960 	if (!jiffy_wait)
961 		jiffy_wait = 1;
962 
963 	/*
964 	 * This wait time is without taking into consideration the rounding
965 	 * up we did. Add that time also.
966 	 */
967 	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
968 	if (wait)
969 		*wait = jiffy_wait;
970 	return 0;
971 }
972 
973 /*
974  * Returns whether one can dispatch a bio or not. Also returns approx number
975  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
976  */
977 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
978 			    unsigned long *wait)
979 {
980 	bool rw = bio_data_dir(bio);
981 	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
982 
983 	/*
984  	 * Currently whole state machine of group depends on first bio
985 	 * queued in the group bio list. So one should not be calling
986 	 * this function with a different bio if there are other bios
987 	 * queued.
988 	 */
989 	BUG_ON(tg->service_queue.nr_queued[rw] &&
990 	       bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
991 
992 	/* If tg->bps = -1, then BW is unlimited */
993 	if (tg_bps_limit(tg, rw) == U64_MAX &&
994 	    tg_iops_limit(tg, rw) == UINT_MAX) {
995 		if (wait)
996 			*wait = 0;
997 		return true;
998 	}
999 
1000 	/*
1001 	 * If previous slice expired, start a new one otherwise renew/extend
1002 	 * existing slice to make sure it is at least throtl_slice interval
1003 	 * long since now. New slice is started only for empty throttle group.
1004 	 * If there is queued bio, that means there should be an active
1005 	 * slice and it should be extended instead.
1006 	 */
1007 	if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
1008 		throtl_start_new_slice(tg, rw);
1009 	else {
1010 		if (time_before(tg->slice_end[rw],
1011 		    jiffies + tg->td->throtl_slice))
1012 			throtl_extend_slice(tg, rw,
1013 				jiffies + tg->td->throtl_slice);
1014 	}
1015 
1016 	if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
1017 	    tg_with_in_iops_limit(tg, bio, &iops_wait)) {
1018 		if (wait)
1019 			*wait = 0;
1020 		return 1;
1021 	}
1022 
1023 	max_wait = max(bps_wait, iops_wait);
1024 
1025 	if (wait)
1026 		*wait = max_wait;
1027 
1028 	if (time_before(tg->slice_end[rw], jiffies + max_wait))
1029 		throtl_extend_slice(tg, rw, jiffies + max_wait);
1030 
1031 	return 0;
1032 }
1033 
1034 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1035 {
1036 	bool rw = bio_data_dir(bio);
1037 
1038 	/* Charge the bio to the group */
1039 	tg->bytes_disp[rw] += bio->bi_iter.bi_size;
1040 	tg->io_disp[rw]++;
1041 	tg->last_bytes_disp[rw] += bio->bi_iter.bi_size;
1042 	tg->last_io_disp[rw]++;
1043 
1044 	/*
1045 	 * BIO_THROTTLED is used to prevent the same bio to be throttled
1046 	 * more than once as a throttled bio will go through blk-throtl the
1047 	 * second time when it eventually gets issued.  Set it when a bio
1048 	 * is being charged to a tg.
1049 	 */
1050 	if (!bio_flagged(bio, BIO_THROTTLED))
1051 		bio_set_flag(bio, BIO_THROTTLED);
1052 }
1053 
1054 /**
1055  * throtl_add_bio_tg - add a bio to the specified throtl_grp
1056  * @bio: bio to add
1057  * @qn: qnode to use
1058  * @tg: the target throtl_grp
1059  *
1060  * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
1061  * tg->qnode_on_self[] is used.
1062  */
1063 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1064 			      struct throtl_grp *tg)
1065 {
1066 	struct throtl_service_queue *sq = &tg->service_queue;
1067 	bool rw = bio_data_dir(bio);
1068 
1069 	if (!qn)
1070 		qn = &tg->qnode_on_self[rw];
1071 
1072 	/*
1073 	 * If @tg doesn't currently have any bios queued in the same
1074 	 * direction, queueing @bio can change when @tg should be
1075 	 * dispatched.  Mark that @tg was empty.  This is automatically
1076 	 * cleaered on the next tg_update_disptime().
1077 	 */
1078 	if (!sq->nr_queued[rw])
1079 		tg->flags |= THROTL_TG_WAS_EMPTY;
1080 
1081 	throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1082 
1083 	sq->nr_queued[rw]++;
1084 	throtl_enqueue_tg(tg);
1085 }
1086 
1087 static void tg_update_disptime(struct throtl_grp *tg)
1088 {
1089 	struct throtl_service_queue *sq = &tg->service_queue;
1090 	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1091 	struct bio *bio;
1092 
1093 	bio = throtl_peek_queued(&sq->queued[READ]);
1094 	if (bio)
1095 		tg_may_dispatch(tg, bio, &read_wait);
1096 
1097 	bio = throtl_peek_queued(&sq->queued[WRITE]);
1098 	if (bio)
1099 		tg_may_dispatch(tg, bio, &write_wait);
1100 
1101 	min_wait = min(read_wait, write_wait);
1102 	disptime = jiffies + min_wait;
1103 
1104 	/* Update dispatch time */
1105 	throtl_dequeue_tg(tg);
1106 	tg->disptime = disptime;
1107 	throtl_enqueue_tg(tg);
1108 
1109 	/* see throtl_add_bio_tg() */
1110 	tg->flags &= ~THROTL_TG_WAS_EMPTY;
1111 }
1112 
1113 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1114 					struct throtl_grp *parent_tg, bool rw)
1115 {
1116 	if (throtl_slice_used(parent_tg, rw)) {
1117 		throtl_start_new_slice_with_credit(parent_tg, rw,
1118 				child_tg->slice_start[rw]);
1119 	}
1120 
1121 }
1122 
1123 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1124 {
1125 	struct throtl_service_queue *sq = &tg->service_queue;
1126 	struct throtl_service_queue *parent_sq = sq->parent_sq;
1127 	struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1128 	struct throtl_grp *tg_to_put = NULL;
1129 	struct bio *bio;
1130 
1131 	/*
1132 	 * @bio is being transferred from @tg to @parent_sq.  Popping a bio
1133 	 * from @tg may put its reference and @parent_sq might end up
1134 	 * getting released prematurely.  Remember the tg to put and put it
1135 	 * after @bio is transferred to @parent_sq.
1136 	 */
1137 	bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1138 	sq->nr_queued[rw]--;
1139 
1140 	throtl_charge_bio(tg, bio);
1141 
1142 	/*
1143 	 * If our parent is another tg, we just need to transfer @bio to
1144 	 * the parent using throtl_add_bio_tg().  If our parent is
1145 	 * @td->service_queue, @bio is ready to be issued.  Put it on its
1146 	 * bio_lists[] and decrease total number queued.  The caller is
1147 	 * responsible for issuing these bios.
1148 	 */
1149 	if (parent_tg) {
1150 		throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1151 		start_parent_slice_with_credit(tg, parent_tg, rw);
1152 	} else {
1153 		throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1154 				     &parent_sq->queued[rw]);
1155 		BUG_ON(tg->td->nr_queued[rw] <= 0);
1156 		tg->td->nr_queued[rw]--;
1157 	}
1158 
1159 	throtl_trim_slice(tg, rw);
1160 
1161 	if (tg_to_put)
1162 		blkg_put(tg_to_blkg(tg_to_put));
1163 }
1164 
1165 static int throtl_dispatch_tg(struct throtl_grp *tg)
1166 {
1167 	struct throtl_service_queue *sq = &tg->service_queue;
1168 	unsigned int nr_reads = 0, nr_writes = 0;
1169 	unsigned int max_nr_reads = throtl_grp_quantum*3/4;
1170 	unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
1171 	struct bio *bio;
1172 
1173 	/* Try to dispatch 75% READS and 25% WRITES */
1174 
1175 	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1176 	       tg_may_dispatch(tg, bio, NULL)) {
1177 
1178 		tg_dispatch_one_bio(tg, bio_data_dir(bio));
1179 		nr_reads++;
1180 
1181 		if (nr_reads >= max_nr_reads)
1182 			break;
1183 	}
1184 
1185 	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1186 	       tg_may_dispatch(tg, bio, NULL)) {
1187 
1188 		tg_dispatch_one_bio(tg, bio_data_dir(bio));
1189 		nr_writes++;
1190 
1191 		if (nr_writes >= max_nr_writes)
1192 			break;
1193 	}
1194 
1195 	return nr_reads + nr_writes;
1196 }
1197 
1198 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1199 {
1200 	unsigned int nr_disp = 0;
1201 
1202 	while (1) {
1203 		struct throtl_grp *tg = throtl_rb_first(parent_sq);
1204 		struct throtl_service_queue *sq = &tg->service_queue;
1205 
1206 		if (!tg)
1207 			break;
1208 
1209 		if (time_before(jiffies, tg->disptime))
1210 			break;
1211 
1212 		throtl_dequeue_tg(tg);
1213 
1214 		nr_disp += throtl_dispatch_tg(tg);
1215 
1216 		if (sq->nr_queued[0] || sq->nr_queued[1])
1217 			tg_update_disptime(tg);
1218 
1219 		if (nr_disp >= throtl_quantum)
1220 			break;
1221 	}
1222 
1223 	return nr_disp;
1224 }
1225 
1226 static bool throtl_can_upgrade(struct throtl_data *td,
1227 	struct throtl_grp *this_tg);
1228 /**
1229  * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1230  * @arg: the throtl_service_queue being serviced
1231  *
1232  * This timer is armed when a child throtl_grp with active bio's become
1233  * pending and queued on the service_queue's pending_tree and expires when
1234  * the first child throtl_grp should be dispatched.  This function
1235  * dispatches bio's from the children throtl_grps to the parent
1236  * service_queue.
1237  *
1238  * If the parent's parent is another throtl_grp, dispatching is propagated
1239  * by either arming its pending_timer or repeating dispatch directly.  If
1240  * the top-level service_tree is reached, throtl_data->dispatch_work is
1241  * kicked so that the ready bio's are issued.
1242  */
1243 static void throtl_pending_timer_fn(unsigned long arg)
1244 {
1245 	struct throtl_service_queue *sq = (void *)arg;
1246 	struct throtl_grp *tg = sq_to_tg(sq);
1247 	struct throtl_data *td = sq_to_td(sq);
1248 	struct request_queue *q = td->queue;
1249 	struct throtl_service_queue *parent_sq;
1250 	bool dispatched;
1251 	int ret;
1252 
1253 	spin_lock_irq(q->queue_lock);
1254 	if (throtl_can_upgrade(td, NULL))
1255 		throtl_upgrade_state(td);
1256 
1257 again:
1258 	parent_sq = sq->parent_sq;
1259 	dispatched = false;
1260 
1261 	while (true) {
1262 		throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1263 			   sq->nr_queued[READ] + sq->nr_queued[WRITE],
1264 			   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1265 
1266 		ret = throtl_select_dispatch(sq);
1267 		if (ret) {
1268 			throtl_log(sq, "bios disp=%u", ret);
1269 			dispatched = true;
1270 		}
1271 
1272 		if (throtl_schedule_next_dispatch(sq, false))
1273 			break;
1274 
1275 		/* this dispatch windows is still open, relax and repeat */
1276 		spin_unlock_irq(q->queue_lock);
1277 		cpu_relax();
1278 		spin_lock_irq(q->queue_lock);
1279 	}
1280 
1281 	if (!dispatched)
1282 		goto out_unlock;
1283 
1284 	if (parent_sq) {
1285 		/* @parent_sq is another throl_grp, propagate dispatch */
1286 		if (tg->flags & THROTL_TG_WAS_EMPTY) {
1287 			tg_update_disptime(tg);
1288 			if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1289 				/* window is already open, repeat dispatching */
1290 				sq = parent_sq;
1291 				tg = sq_to_tg(sq);
1292 				goto again;
1293 			}
1294 		}
1295 	} else {
1296 		/* reached the top-level, queue issueing */
1297 		queue_work(kthrotld_workqueue, &td->dispatch_work);
1298 	}
1299 out_unlock:
1300 	spin_unlock_irq(q->queue_lock);
1301 }
1302 
1303 /**
1304  * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1305  * @work: work item being executed
1306  *
1307  * This function is queued for execution when bio's reach the bio_lists[]
1308  * of throtl_data->service_queue.  Those bio's are ready and issued by this
1309  * function.
1310  */
1311 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1312 {
1313 	struct throtl_data *td = container_of(work, struct throtl_data,
1314 					      dispatch_work);
1315 	struct throtl_service_queue *td_sq = &td->service_queue;
1316 	struct request_queue *q = td->queue;
1317 	struct bio_list bio_list_on_stack;
1318 	struct bio *bio;
1319 	struct blk_plug plug;
1320 	int rw;
1321 
1322 	bio_list_init(&bio_list_on_stack);
1323 
1324 	spin_lock_irq(q->queue_lock);
1325 	for (rw = READ; rw <= WRITE; rw++)
1326 		while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1327 			bio_list_add(&bio_list_on_stack, bio);
1328 	spin_unlock_irq(q->queue_lock);
1329 
1330 	if (!bio_list_empty(&bio_list_on_stack)) {
1331 		blk_start_plug(&plug);
1332 		while((bio = bio_list_pop(&bio_list_on_stack)))
1333 			generic_make_request(bio);
1334 		blk_finish_plug(&plug);
1335 	}
1336 }
1337 
1338 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1339 			      int off)
1340 {
1341 	struct throtl_grp *tg = pd_to_tg(pd);
1342 	u64 v = *(u64 *)((void *)tg + off);
1343 
1344 	if (v == U64_MAX)
1345 		return 0;
1346 	return __blkg_prfill_u64(sf, pd, v);
1347 }
1348 
1349 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1350 			       int off)
1351 {
1352 	struct throtl_grp *tg = pd_to_tg(pd);
1353 	unsigned int v = *(unsigned int *)((void *)tg + off);
1354 
1355 	if (v == UINT_MAX)
1356 		return 0;
1357 	return __blkg_prfill_u64(sf, pd, v);
1358 }
1359 
1360 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1361 {
1362 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1363 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1364 	return 0;
1365 }
1366 
1367 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1368 {
1369 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1370 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1371 	return 0;
1372 }
1373 
1374 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1375 {
1376 	struct throtl_service_queue *sq = &tg->service_queue;
1377 	struct cgroup_subsys_state *pos_css;
1378 	struct blkcg_gq *blkg;
1379 
1380 	throtl_log(&tg->service_queue,
1381 		   "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1382 		   tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1383 		   tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1384 
1385 	/*
1386 	 * Update has_rules[] flags for the updated tg's subtree.  A tg is
1387 	 * considered to have rules if either the tg itself or any of its
1388 	 * ancestors has rules.  This identifies groups without any
1389 	 * restrictions in the whole hierarchy and allows them to bypass
1390 	 * blk-throttle.
1391 	 */
1392 	blkg_for_each_descendant_pre(blkg, pos_css,
1393 			global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1394 		struct throtl_grp *this_tg = blkg_to_tg(blkg);
1395 		struct throtl_grp *parent_tg;
1396 
1397 		tg_update_has_rules(this_tg);
1398 		/* ignore root/second level */
1399 		if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1400 		    !blkg->parent->parent)
1401 			continue;
1402 		parent_tg = blkg_to_tg(blkg->parent);
1403 		/*
1404 		 * make sure all children has lower idle time threshold and
1405 		 * higher latency target
1406 		 */
1407 		this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1408 				parent_tg->idletime_threshold);
1409 		this_tg->latency_target = max(this_tg->latency_target,
1410 				parent_tg->latency_target);
1411 	}
1412 
1413 	/*
1414 	 * We're already holding queue_lock and know @tg is valid.  Let's
1415 	 * apply the new config directly.
1416 	 *
1417 	 * Restart the slices for both READ and WRITES. It might happen
1418 	 * that a group's limit are dropped suddenly and we don't want to
1419 	 * account recently dispatched IO with new low rate.
1420 	 */
1421 	throtl_start_new_slice(tg, 0);
1422 	throtl_start_new_slice(tg, 1);
1423 
1424 	if (tg->flags & THROTL_TG_PENDING) {
1425 		tg_update_disptime(tg);
1426 		throtl_schedule_next_dispatch(sq->parent_sq, true);
1427 	}
1428 }
1429 
1430 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1431 			   char *buf, size_t nbytes, loff_t off, bool is_u64)
1432 {
1433 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1434 	struct blkg_conf_ctx ctx;
1435 	struct throtl_grp *tg;
1436 	int ret;
1437 	u64 v;
1438 
1439 	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1440 	if (ret)
1441 		return ret;
1442 
1443 	ret = -EINVAL;
1444 	if (sscanf(ctx.body, "%llu", &v) != 1)
1445 		goto out_finish;
1446 	if (!v)
1447 		v = U64_MAX;
1448 
1449 	tg = blkg_to_tg(ctx.blkg);
1450 
1451 	if (is_u64)
1452 		*(u64 *)((void *)tg + of_cft(of)->private) = v;
1453 	else
1454 		*(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1455 
1456 	tg_conf_updated(tg, false);
1457 	ret = 0;
1458 out_finish:
1459 	blkg_conf_finish(&ctx);
1460 	return ret ?: nbytes;
1461 }
1462 
1463 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1464 			       char *buf, size_t nbytes, loff_t off)
1465 {
1466 	return tg_set_conf(of, buf, nbytes, off, true);
1467 }
1468 
1469 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1470 				char *buf, size_t nbytes, loff_t off)
1471 {
1472 	return tg_set_conf(of, buf, nbytes, off, false);
1473 }
1474 
1475 static struct cftype throtl_legacy_files[] = {
1476 	{
1477 		.name = "throttle.read_bps_device",
1478 		.private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1479 		.seq_show = tg_print_conf_u64,
1480 		.write = tg_set_conf_u64,
1481 	},
1482 	{
1483 		.name = "throttle.write_bps_device",
1484 		.private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1485 		.seq_show = tg_print_conf_u64,
1486 		.write = tg_set_conf_u64,
1487 	},
1488 	{
1489 		.name = "throttle.read_iops_device",
1490 		.private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1491 		.seq_show = tg_print_conf_uint,
1492 		.write = tg_set_conf_uint,
1493 	},
1494 	{
1495 		.name = "throttle.write_iops_device",
1496 		.private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1497 		.seq_show = tg_print_conf_uint,
1498 		.write = tg_set_conf_uint,
1499 	},
1500 	{
1501 		.name = "throttle.io_service_bytes",
1502 		.private = (unsigned long)&blkcg_policy_throtl,
1503 		.seq_show = blkg_print_stat_bytes,
1504 	},
1505 	{
1506 		.name = "throttle.io_serviced",
1507 		.private = (unsigned long)&blkcg_policy_throtl,
1508 		.seq_show = blkg_print_stat_ios,
1509 	},
1510 	{ }	/* terminate */
1511 };
1512 
1513 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1514 			 int off)
1515 {
1516 	struct throtl_grp *tg = pd_to_tg(pd);
1517 	const char *dname = blkg_dev_name(pd->blkg);
1518 	char bufs[4][21] = { "max", "max", "max", "max" };
1519 	u64 bps_dft;
1520 	unsigned int iops_dft;
1521 	char idle_time[26] = "";
1522 	char latency_time[26] = "";
1523 
1524 	if (!dname)
1525 		return 0;
1526 
1527 	if (off == LIMIT_LOW) {
1528 		bps_dft = 0;
1529 		iops_dft = 0;
1530 	} else {
1531 		bps_dft = U64_MAX;
1532 		iops_dft = UINT_MAX;
1533 	}
1534 
1535 	if (tg->bps_conf[READ][off] == bps_dft &&
1536 	    tg->bps_conf[WRITE][off] == bps_dft &&
1537 	    tg->iops_conf[READ][off] == iops_dft &&
1538 	    tg->iops_conf[WRITE][off] == iops_dft &&
1539 	    (off != LIMIT_LOW ||
1540 	     (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1541 	      tg->latency_target_conf == DFL_LATENCY_TARGET)))
1542 		return 0;
1543 
1544 	if (tg->bps_conf[READ][off] != U64_MAX)
1545 		snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1546 			tg->bps_conf[READ][off]);
1547 	if (tg->bps_conf[WRITE][off] != U64_MAX)
1548 		snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1549 			tg->bps_conf[WRITE][off]);
1550 	if (tg->iops_conf[READ][off] != UINT_MAX)
1551 		snprintf(bufs[2], sizeof(bufs[2]), "%u",
1552 			tg->iops_conf[READ][off]);
1553 	if (tg->iops_conf[WRITE][off] != UINT_MAX)
1554 		snprintf(bufs[3], sizeof(bufs[3]), "%u",
1555 			tg->iops_conf[WRITE][off]);
1556 	if (off == LIMIT_LOW) {
1557 		if (tg->idletime_threshold_conf == ULONG_MAX)
1558 			strcpy(idle_time, " idle=max");
1559 		else
1560 			snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1561 				tg->idletime_threshold_conf);
1562 
1563 		if (tg->latency_target_conf == ULONG_MAX)
1564 			strcpy(latency_time, " latency=max");
1565 		else
1566 			snprintf(latency_time, sizeof(latency_time),
1567 				" latency=%lu", tg->latency_target_conf);
1568 	}
1569 
1570 	seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1571 		   dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1572 		   latency_time);
1573 	return 0;
1574 }
1575 
1576 static int tg_print_limit(struct seq_file *sf, void *v)
1577 {
1578 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1579 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1580 	return 0;
1581 }
1582 
1583 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1584 			  char *buf, size_t nbytes, loff_t off)
1585 {
1586 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1587 	struct blkg_conf_ctx ctx;
1588 	struct throtl_grp *tg;
1589 	u64 v[4];
1590 	unsigned long idle_time;
1591 	unsigned long latency_time;
1592 	int ret;
1593 	int index = of_cft(of)->private;
1594 
1595 	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1596 	if (ret)
1597 		return ret;
1598 
1599 	tg = blkg_to_tg(ctx.blkg);
1600 
1601 	v[0] = tg->bps_conf[READ][index];
1602 	v[1] = tg->bps_conf[WRITE][index];
1603 	v[2] = tg->iops_conf[READ][index];
1604 	v[3] = tg->iops_conf[WRITE][index];
1605 
1606 	idle_time = tg->idletime_threshold_conf;
1607 	latency_time = tg->latency_target_conf;
1608 	while (true) {
1609 		char tok[27];	/* wiops=18446744073709551616 */
1610 		char *p;
1611 		u64 val = U64_MAX;
1612 		int len;
1613 
1614 		if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1615 			break;
1616 		if (tok[0] == '\0')
1617 			break;
1618 		ctx.body += len;
1619 
1620 		ret = -EINVAL;
1621 		p = tok;
1622 		strsep(&p, "=");
1623 		if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1624 			goto out_finish;
1625 
1626 		ret = -ERANGE;
1627 		if (!val)
1628 			goto out_finish;
1629 
1630 		ret = -EINVAL;
1631 		if (!strcmp(tok, "rbps"))
1632 			v[0] = val;
1633 		else if (!strcmp(tok, "wbps"))
1634 			v[1] = val;
1635 		else if (!strcmp(tok, "riops"))
1636 			v[2] = min_t(u64, val, UINT_MAX);
1637 		else if (!strcmp(tok, "wiops"))
1638 			v[3] = min_t(u64, val, UINT_MAX);
1639 		else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1640 			idle_time = val;
1641 		else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1642 			latency_time = val;
1643 		else
1644 			goto out_finish;
1645 	}
1646 
1647 	tg->bps_conf[READ][index] = v[0];
1648 	tg->bps_conf[WRITE][index] = v[1];
1649 	tg->iops_conf[READ][index] = v[2];
1650 	tg->iops_conf[WRITE][index] = v[3];
1651 
1652 	if (index == LIMIT_MAX) {
1653 		tg->bps[READ][index] = v[0];
1654 		tg->bps[WRITE][index] = v[1];
1655 		tg->iops[READ][index] = v[2];
1656 		tg->iops[WRITE][index] = v[3];
1657 	}
1658 	tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1659 		tg->bps_conf[READ][LIMIT_MAX]);
1660 	tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1661 		tg->bps_conf[WRITE][LIMIT_MAX]);
1662 	tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1663 		tg->iops_conf[READ][LIMIT_MAX]);
1664 	tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1665 		tg->iops_conf[WRITE][LIMIT_MAX]);
1666 	tg->idletime_threshold_conf = idle_time;
1667 	tg->latency_target_conf = latency_time;
1668 
1669 	/* force user to configure all settings for low limit  */
1670 	if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1671 	      tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1672 	    tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1673 	    tg->latency_target_conf == DFL_LATENCY_TARGET) {
1674 		tg->bps[READ][LIMIT_LOW] = 0;
1675 		tg->bps[WRITE][LIMIT_LOW] = 0;
1676 		tg->iops[READ][LIMIT_LOW] = 0;
1677 		tg->iops[WRITE][LIMIT_LOW] = 0;
1678 		tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1679 		tg->latency_target = DFL_LATENCY_TARGET;
1680 	} else if (index == LIMIT_LOW) {
1681 		tg->idletime_threshold = tg->idletime_threshold_conf;
1682 		tg->latency_target = tg->latency_target_conf;
1683 	}
1684 
1685 	blk_throtl_update_limit_valid(tg->td);
1686 	if (tg->td->limit_valid[LIMIT_LOW]) {
1687 		if (index == LIMIT_LOW)
1688 			tg->td->limit_index = LIMIT_LOW;
1689 	} else
1690 		tg->td->limit_index = LIMIT_MAX;
1691 	tg_conf_updated(tg, index == LIMIT_LOW &&
1692 		tg->td->limit_valid[LIMIT_LOW]);
1693 	ret = 0;
1694 out_finish:
1695 	blkg_conf_finish(&ctx);
1696 	return ret ?: nbytes;
1697 }
1698 
1699 static struct cftype throtl_files[] = {
1700 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1701 	{
1702 		.name = "low",
1703 		.flags = CFTYPE_NOT_ON_ROOT,
1704 		.seq_show = tg_print_limit,
1705 		.write = tg_set_limit,
1706 		.private = LIMIT_LOW,
1707 	},
1708 #endif
1709 	{
1710 		.name = "max",
1711 		.flags = CFTYPE_NOT_ON_ROOT,
1712 		.seq_show = tg_print_limit,
1713 		.write = tg_set_limit,
1714 		.private = LIMIT_MAX,
1715 	},
1716 	{ }	/* terminate */
1717 };
1718 
1719 static void throtl_shutdown_wq(struct request_queue *q)
1720 {
1721 	struct throtl_data *td = q->td;
1722 
1723 	cancel_work_sync(&td->dispatch_work);
1724 }
1725 
1726 static struct blkcg_policy blkcg_policy_throtl = {
1727 	.dfl_cftypes		= throtl_files,
1728 	.legacy_cftypes		= throtl_legacy_files,
1729 
1730 	.pd_alloc_fn		= throtl_pd_alloc,
1731 	.pd_init_fn		= throtl_pd_init,
1732 	.pd_online_fn		= throtl_pd_online,
1733 	.pd_offline_fn		= throtl_pd_offline,
1734 	.pd_free_fn		= throtl_pd_free,
1735 };
1736 
1737 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1738 {
1739 	unsigned long rtime = jiffies, wtime = jiffies;
1740 
1741 	if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1742 		rtime = tg->last_low_overflow_time[READ];
1743 	if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1744 		wtime = tg->last_low_overflow_time[WRITE];
1745 	return min(rtime, wtime);
1746 }
1747 
1748 /* tg should not be an intermediate node */
1749 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1750 {
1751 	struct throtl_service_queue *parent_sq;
1752 	struct throtl_grp *parent = tg;
1753 	unsigned long ret = __tg_last_low_overflow_time(tg);
1754 
1755 	while (true) {
1756 		parent_sq = parent->service_queue.parent_sq;
1757 		parent = sq_to_tg(parent_sq);
1758 		if (!parent)
1759 			break;
1760 
1761 		/*
1762 		 * The parent doesn't have low limit, it always reaches low
1763 		 * limit. Its overflow time is useless for children
1764 		 */
1765 		if (!parent->bps[READ][LIMIT_LOW] &&
1766 		    !parent->iops[READ][LIMIT_LOW] &&
1767 		    !parent->bps[WRITE][LIMIT_LOW] &&
1768 		    !parent->iops[WRITE][LIMIT_LOW])
1769 			continue;
1770 		if (time_after(__tg_last_low_overflow_time(parent), ret))
1771 			ret = __tg_last_low_overflow_time(parent);
1772 	}
1773 	return ret;
1774 }
1775 
1776 static bool throtl_tg_is_idle(struct throtl_grp *tg)
1777 {
1778 	/*
1779 	 * cgroup is idle if:
1780 	 * - single idle is too long, longer than a fixed value (in case user
1781 	 *   configure a too big threshold) or 4 times of idletime threshold
1782 	 * - average think time is more than threshold
1783 	 * - IO latency is largely below threshold
1784 	 */
1785 	unsigned long time;
1786 	bool ret;
1787 
1788 	time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1789 	ret = tg->latency_target == DFL_LATENCY_TARGET ||
1790 	      tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1791 	      (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1792 	      tg->avg_idletime > tg->idletime_threshold ||
1793 	      (tg->latency_target && tg->bio_cnt &&
1794 		tg->bad_bio_cnt * 5 < tg->bio_cnt);
1795 	throtl_log(&tg->service_queue,
1796 		"avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1797 		tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1798 		tg->bio_cnt, ret, tg->td->scale);
1799 	return ret;
1800 }
1801 
1802 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1803 {
1804 	struct throtl_service_queue *sq = &tg->service_queue;
1805 	bool read_limit, write_limit;
1806 
1807 	/*
1808 	 * if cgroup reaches low limit (if low limit is 0, the cgroup always
1809 	 * reaches), it's ok to upgrade to next limit
1810 	 */
1811 	read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1812 	write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1813 	if (!read_limit && !write_limit)
1814 		return true;
1815 	if (read_limit && sq->nr_queued[READ] &&
1816 	    (!write_limit || sq->nr_queued[WRITE]))
1817 		return true;
1818 	if (write_limit && sq->nr_queued[WRITE] &&
1819 	    (!read_limit || sq->nr_queued[READ]))
1820 		return true;
1821 
1822 	if (time_after_eq(jiffies,
1823 		tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1824 	    throtl_tg_is_idle(tg))
1825 		return true;
1826 	return false;
1827 }
1828 
1829 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1830 {
1831 	while (true) {
1832 		if (throtl_tg_can_upgrade(tg))
1833 			return true;
1834 		tg = sq_to_tg(tg->service_queue.parent_sq);
1835 		if (!tg || !tg_to_blkg(tg)->parent)
1836 			return false;
1837 	}
1838 	return false;
1839 }
1840 
1841 static bool throtl_can_upgrade(struct throtl_data *td,
1842 	struct throtl_grp *this_tg)
1843 {
1844 	struct cgroup_subsys_state *pos_css;
1845 	struct blkcg_gq *blkg;
1846 
1847 	if (td->limit_index != LIMIT_LOW)
1848 		return false;
1849 
1850 	if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1851 		return false;
1852 
1853 	rcu_read_lock();
1854 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1855 		struct throtl_grp *tg = blkg_to_tg(blkg);
1856 
1857 		if (tg == this_tg)
1858 			continue;
1859 		if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1860 			continue;
1861 		if (!throtl_hierarchy_can_upgrade(tg)) {
1862 			rcu_read_unlock();
1863 			return false;
1864 		}
1865 	}
1866 	rcu_read_unlock();
1867 	return true;
1868 }
1869 
1870 static void throtl_upgrade_check(struct throtl_grp *tg)
1871 {
1872 	unsigned long now = jiffies;
1873 
1874 	if (tg->td->limit_index != LIMIT_LOW)
1875 		return;
1876 
1877 	if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1878 		return;
1879 
1880 	tg->last_check_time = now;
1881 
1882 	if (!time_after_eq(now,
1883 	     __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1884 		return;
1885 
1886 	if (throtl_can_upgrade(tg->td, NULL))
1887 		throtl_upgrade_state(tg->td);
1888 }
1889 
1890 static void throtl_upgrade_state(struct throtl_data *td)
1891 {
1892 	struct cgroup_subsys_state *pos_css;
1893 	struct blkcg_gq *blkg;
1894 
1895 	throtl_log(&td->service_queue, "upgrade to max");
1896 	td->limit_index = LIMIT_MAX;
1897 	td->low_upgrade_time = jiffies;
1898 	td->scale = 0;
1899 	rcu_read_lock();
1900 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1901 		struct throtl_grp *tg = blkg_to_tg(blkg);
1902 		struct throtl_service_queue *sq = &tg->service_queue;
1903 
1904 		tg->disptime = jiffies - 1;
1905 		throtl_select_dispatch(sq);
1906 		throtl_schedule_next_dispatch(sq, false);
1907 	}
1908 	rcu_read_unlock();
1909 	throtl_select_dispatch(&td->service_queue);
1910 	throtl_schedule_next_dispatch(&td->service_queue, false);
1911 	queue_work(kthrotld_workqueue, &td->dispatch_work);
1912 }
1913 
1914 static void throtl_downgrade_state(struct throtl_data *td, int new)
1915 {
1916 	td->scale /= 2;
1917 
1918 	throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1919 	if (td->scale) {
1920 		td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1921 		return;
1922 	}
1923 
1924 	td->limit_index = new;
1925 	td->low_downgrade_time = jiffies;
1926 }
1927 
1928 static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1929 {
1930 	struct throtl_data *td = tg->td;
1931 	unsigned long now = jiffies;
1932 
1933 	/*
1934 	 * If cgroup is below low limit, consider downgrade and throttle other
1935 	 * cgroups
1936 	 */
1937 	if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1938 	    time_after_eq(now, tg_last_low_overflow_time(tg) +
1939 					td->throtl_slice) &&
1940 	    (!throtl_tg_is_idle(tg) ||
1941 	     !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1942 		return true;
1943 	return false;
1944 }
1945 
1946 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1947 {
1948 	while (true) {
1949 		if (!throtl_tg_can_downgrade(tg))
1950 			return false;
1951 		tg = sq_to_tg(tg->service_queue.parent_sq);
1952 		if (!tg || !tg_to_blkg(tg)->parent)
1953 			break;
1954 	}
1955 	return true;
1956 }
1957 
1958 static void throtl_downgrade_check(struct throtl_grp *tg)
1959 {
1960 	uint64_t bps;
1961 	unsigned int iops;
1962 	unsigned long elapsed_time;
1963 	unsigned long now = jiffies;
1964 
1965 	if (tg->td->limit_index != LIMIT_MAX ||
1966 	    !tg->td->limit_valid[LIMIT_LOW])
1967 		return;
1968 	if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1969 		return;
1970 	if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1971 		return;
1972 
1973 	elapsed_time = now - tg->last_check_time;
1974 	tg->last_check_time = now;
1975 
1976 	if (time_before(now, tg_last_low_overflow_time(tg) +
1977 			tg->td->throtl_slice))
1978 		return;
1979 
1980 	if (tg->bps[READ][LIMIT_LOW]) {
1981 		bps = tg->last_bytes_disp[READ] * HZ;
1982 		do_div(bps, elapsed_time);
1983 		if (bps >= tg->bps[READ][LIMIT_LOW])
1984 			tg->last_low_overflow_time[READ] = now;
1985 	}
1986 
1987 	if (tg->bps[WRITE][LIMIT_LOW]) {
1988 		bps = tg->last_bytes_disp[WRITE] * HZ;
1989 		do_div(bps, elapsed_time);
1990 		if (bps >= tg->bps[WRITE][LIMIT_LOW])
1991 			tg->last_low_overflow_time[WRITE] = now;
1992 	}
1993 
1994 	if (tg->iops[READ][LIMIT_LOW]) {
1995 		iops = tg->last_io_disp[READ] * HZ / elapsed_time;
1996 		if (iops >= tg->iops[READ][LIMIT_LOW])
1997 			tg->last_low_overflow_time[READ] = now;
1998 	}
1999 
2000 	if (tg->iops[WRITE][LIMIT_LOW]) {
2001 		iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2002 		if (iops >= tg->iops[WRITE][LIMIT_LOW])
2003 			tg->last_low_overflow_time[WRITE] = now;
2004 	}
2005 
2006 	/*
2007 	 * If cgroup is below low limit, consider downgrade and throttle other
2008 	 * cgroups
2009 	 */
2010 	if (throtl_hierarchy_can_downgrade(tg))
2011 		throtl_downgrade_state(tg->td, LIMIT_LOW);
2012 
2013 	tg->last_bytes_disp[READ] = 0;
2014 	tg->last_bytes_disp[WRITE] = 0;
2015 	tg->last_io_disp[READ] = 0;
2016 	tg->last_io_disp[WRITE] = 0;
2017 }
2018 
2019 static void blk_throtl_update_idletime(struct throtl_grp *tg)
2020 {
2021 	unsigned long now = ktime_get_ns() >> 10;
2022 	unsigned long last_finish_time = tg->last_finish_time;
2023 
2024 	if (now <= last_finish_time || last_finish_time == 0 ||
2025 	    last_finish_time == tg->checked_last_finish_time)
2026 		return;
2027 
2028 	tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2029 	tg->checked_last_finish_time = last_finish_time;
2030 }
2031 
2032 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2033 static void throtl_update_latency_buckets(struct throtl_data *td)
2034 {
2035 	struct avg_latency_bucket avg_latency[LATENCY_BUCKET_SIZE];
2036 	int i, cpu;
2037 	unsigned long last_latency = 0;
2038 	unsigned long latency;
2039 
2040 	if (!blk_queue_nonrot(td->queue))
2041 		return;
2042 	if (time_before(jiffies, td->last_calculate_time + HZ))
2043 		return;
2044 	td->last_calculate_time = jiffies;
2045 
2046 	memset(avg_latency, 0, sizeof(avg_latency));
2047 	for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2048 		struct latency_bucket *tmp = &td->tmp_buckets[i];
2049 
2050 		for_each_possible_cpu(cpu) {
2051 			struct latency_bucket *bucket;
2052 
2053 			/* this isn't race free, but ok in practice */
2054 			bucket = per_cpu_ptr(td->latency_buckets, cpu);
2055 			tmp->total_latency += bucket[i].total_latency;
2056 			tmp->samples += bucket[i].samples;
2057 			bucket[i].total_latency = 0;
2058 			bucket[i].samples = 0;
2059 		}
2060 
2061 		if (tmp->samples >= 32) {
2062 			int samples = tmp->samples;
2063 
2064 			latency = tmp->total_latency;
2065 
2066 			tmp->total_latency = 0;
2067 			tmp->samples = 0;
2068 			latency /= samples;
2069 			if (latency == 0)
2070 				continue;
2071 			avg_latency[i].latency = latency;
2072 		}
2073 	}
2074 
2075 	for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2076 		if (!avg_latency[i].latency) {
2077 			if (td->avg_buckets[i].latency < last_latency)
2078 				td->avg_buckets[i].latency = last_latency;
2079 			continue;
2080 		}
2081 
2082 		if (!td->avg_buckets[i].valid)
2083 			latency = avg_latency[i].latency;
2084 		else
2085 			latency = (td->avg_buckets[i].latency * 7 +
2086 				avg_latency[i].latency) >> 3;
2087 
2088 		td->avg_buckets[i].latency = max(latency, last_latency);
2089 		td->avg_buckets[i].valid = true;
2090 		last_latency = td->avg_buckets[i].latency;
2091 	}
2092 
2093 	for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2094 		throtl_log(&td->service_queue,
2095 			"Latency bucket %d: latency=%ld, valid=%d", i,
2096 			td->avg_buckets[i].latency, td->avg_buckets[i].valid);
2097 }
2098 #else
2099 static inline void throtl_update_latency_buckets(struct throtl_data *td)
2100 {
2101 }
2102 #endif
2103 
2104 static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
2105 {
2106 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2107 	int ret;
2108 
2109 	ret = bio_associate_current(bio);
2110 	if (ret == 0 || ret == -EBUSY)
2111 		bio->bi_cg_private = tg;
2112 	blk_stat_set_issue(&bio->bi_issue_stat, bio_sectors(bio));
2113 #else
2114 	bio_associate_current(bio);
2115 #endif
2116 }
2117 
2118 bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
2119 		    struct bio *bio)
2120 {
2121 	struct throtl_qnode *qn = NULL;
2122 	struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
2123 	struct throtl_service_queue *sq;
2124 	bool rw = bio_data_dir(bio);
2125 	bool throttled = false;
2126 	struct throtl_data *td = tg->td;
2127 
2128 	WARN_ON_ONCE(!rcu_read_lock_held());
2129 
2130 	/* see throtl_charge_bio() */
2131 	if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
2132 		goto out;
2133 
2134 	spin_lock_irq(q->queue_lock);
2135 
2136 	throtl_update_latency_buckets(td);
2137 
2138 	if (unlikely(blk_queue_bypass(q)))
2139 		goto out_unlock;
2140 
2141 	blk_throtl_assoc_bio(tg, bio);
2142 	blk_throtl_update_idletime(tg);
2143 
2144 	sq = &tg->service_queue;
2145 
2146 again:
2147 	while (true) {
2148 		if (tg->last_low_overflow_time[rw] == 0)
2149 			tg->last_low_overflow_time[rw] = jiffies;
2150 		throtl_downgrade_check(tg);
2151 		throtl_upgrade_check(tg);
2152 		/* throtl is FIFO - if bios are already queued, should queue */
2153 		if (sq->nr_queued[rw])
2154 			break;
2155 
2156 		/* if above limits, break to queue */
2157 		if (!tg_may_dispatch(tg, bio, NULL)) {
2158 			tg->last_low_overflow_time[rw] = jiffies;
2159 			if (throtl_can_upgrade(td, tg)) {
2160 				throtl_upgrade_state(td);
2161 				goto again;
2162 			}
2163 			break;
2164 		}
2165 
2166 		/* within limits, let's charge and dispatch directly */
2167 		throtl_charge_bio(tg, bio);
2168 
2169 		/*
2170 		 * We need to trim slice even when bios are not being queued
2171 		 * otherwise it might happen that a bio is not queued for
2172 		 * a long time and slice keeps on extending and trim is not
2173 		 * called for a long time. Now if limits are reduced suddenly
2174 		 * we take into account all the IO dispatched so far at new
2175 		 * low rate and * newly queued IO gets a really long dispatch
2176 		 * time.
2177 		 *
2178 		 * So keep on trimming slice even if bio is not queued.
2179 		 */
2180 		throtl_trim_slice(tg, rw);
2181 
2182 		/*
2183 		 * @bio passed through this layer without being throttled.
2184 		 * Climb up the ladder.  If we''re already at the top, it
2185 		 * can be executed directly.
2186 		 */
2187 		qn = &tg->qnode_on_parent[rw];
2188 		sq = sq->parent_sq;
2189 		tg = sq_to_tg(sq);
2190 		if (!tg)
2191 			goto out_unlock;
2192 	}
2193 
2194 	/* out-of-limit, queue to @tg */
2195 	throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2196 		   rw == READ ? 'R' : 'W',
2197 		   tg->bytes_disp[rw], bio->bi_iter.bi_size,
2198 		   tg_bps_limit(tg, rw),
2199 		   tg->io_disp[rw], tg_iops_limit(tg, rw),
2200 		   sq->nr_queued[READ], sq->nr_queued[WRITE]);
2201 
2202 	tg->last_low_overflow_time[rw] = jiffies;
2203 
2204 	td->nr_queued[rw]++;
2205 	throtl_add_bio_tg(bio, qn, tg);
2206 	throttled = true;
2207 
2208 	/*
2209 	 * Update @tg's dispatch time and force schedule dispatch if @tg
2210 	 * was empty before @bio.  The forced scheduling isn't likely to
2211 	 * cause undue delay as @bio is likely to be dispatched directly if
2212 	 * its @tg's disptime is not in the future.
2213 	 */
2214 	if (tg->flags & THROTL_TG_WAS_EMPTY) {
2215 		tg_update_disptime(tg);
2216 		throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2217 	}
2218 
2219 out_unlock:
2220 	spin_unlock_irq(q->queue_lock);
2221 out:
2222 	/*
2223 	 * As multiple blk-throtls may stack in the same issue path, we
2224 	 * don't want bios to leave with the flag set.  Clear the flag if
2225 	 * being issued.
2226 	 */
2227 	if (!throttled)
2228 		bio_clear_flag(bio, BIO_THROTTLED);
2229 
2230 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2231 	if (throttled || !td->track_bio_latency)
2232 		bio->bi_issue_stat.stat |= SKIP_LATENCY;
2233 #endif
2234 	return throttled;
2235 }
2236 
2237 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2238 static void throtl_track_latency(struct throtl_data *td, sector_t size,
2239 	int op, unsigned long time)
2240 {
2241 	struct latency_bucket *latency;
2242 	int index;
2243 
2244 	if (!td || td->limit_index != LIMIT_LOW || op != REQ_OP_READ ||
2245 	    !blk_queue_nonrot(td->queue))
2246 		return;
2247 
2248 	index = request_bucket_index(size);
2249 
2250 	latency = get_cpu_ptr(td->latency_buckets);
2251 	latency[index].total_latency += time;
2252 	latency[index].samples++;
2253 	put_cpu_ptr(td->latency_buckets);
2254 }
2255 
2256 void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2257 {
2258 	struct request_queue *q = rq->q;
2259 	struct throtl_data *td = q->td;
2260 
2261 	throtl_track_latency(td, blk_stat_size(&rq->issue_stat),
2262 		req_op(rq), time_ns >> 10);
2263 }
2264 
2265 void blk_throtl_bio_endio(struct bio *bio)
2266 {
2267 	struct throtl_grp *tg;
2268 	u64 finish_time_ns;
2269 	unsigned long finish_time;
2270 	unsigned long start_time;
2271 	unsigned long lat;
2272 
2273 	tg = bio->bi_cg_private;
2274 	if (!tg)
2275 		return;
2276 	bio->bi_cg_private = NULL;
2277 
2278 	finish_time_ns = ktime_get_ns();
2279 	tg->last_finish_time = finish_time_ns >> 10;
2280 
2281 	start_time = blk_stat_time(&bio->bi_issue_stat) >> 10;
2282 	finish_time = __blk_stat_time(finish_time_ns) >> 10;
2283 	if (!start_time || finish_time <= start_time)
2284 		return;
2285 
2286 	lat = finish_time - start_time;
2287 	/* this is only for bio based driver */
2288 	if (!(bio->bi_issue_stat.stat & SKIP_LATENCY))
2289 		throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
2290 			bio_op(bio), lat);
2291 
2292 	if (tg->latency_target && lat >= tg->td->filtered_latency) {
2293 		int bucket;
2294 		unsigned int threshold;
2295 
2296 		bucket = request_bucket_index(
2297 			blk_stat_size(&bio->bi_issue_stat));
2298 		threshold = tg->td->avg_buckets[bucket].latency +
2299 			tg->latency_target;
2300 		if (lat > threshold)
2301 			tg->bad_bio_cnt++;
2302 		/*
2303 		 * Not race free, could get wrong count, which means cgroups
2304 		 * will be throttled
2305 		 */
2306 		tg->bio_cnt++;
2307 	}
2308 
2309 	if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2310 		tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2311 		tg->bio_cnt /= 2;
2312 		tg->bad_bio_cnt /= 2;
2313 	}
2314 }
2315 #endif
2316 
2317 /*
2318  * Dispatch all bios from all children tg's queued on @parent_sq.  On
2319  * return, @parent_sq is guaranteed to not have any active children tg's
2320  * and all bios from previously active tg's are on @parent_sq->bio_lists[].
2321  */
2322 static void tg_drain_bios(struct throtl_service_queue *parent_sq)
2323 {
2324 	struct throtl_grp *tg;
2325 
2326 	while ((tg = throtl_rb_first(parent_sq))) {
2327 		struct throtl_service_queue *sq = &tg->service_queue;
2328 		struct bio *bio;
2329 
2330 		throtl_dequeue_tg(tg);
2331 
2332 		while ((bio = throtl_peek_queued(&sq->queued[READ])))
2333 			tg_dispatch_one_bio(tg, bio_data_dir(bio));
2334 		while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2335 			tg_dispatch_one_bio(tg, bio_data_dir(bio));
2336 	}
2337 }
2338 
2339 /**
2340  * blk_throtl_drain - drain throttled bios
2341  * @q: request_queue to drain throttled bios for
2342  *
2343  * Dispatch all currently throttled bios on @q through ->make_request_fn().
2344  */
2345 void blk_throtl_drain(struct request_queue *q)
2346 	__releases(q->queue_lock) __acquires(q->queue_lock)
2347 {
2348 	struct throtl_data *td = q->td;
2349 	struct blkcg_gq *blkg;
2350 	struct cgroup_subsys_state *pos_css;
2351 	struct bio *bio;
2352 	int rw;
2353 
2354 	queue_lockdep_assert_held(q);
2355 	rcu_read_lock();
2356 
2357 	/*
2358 	 * Drain each tg while doing post-order walk on the blkg tree, so
2359 	 * that all bios are propagated to td->service_queue.  It'd be
2360 	 * better to walk service_queue tree directly but blkg walk is
2361 	 * easier.
2362 	 */
2363 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2364 		tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
2365 
2366 	/* finally, transfer bios from top-level tg's into the td */
2367 	tg_drain_bios(&td->service_queue);
2368 
2369 	rcu_read_unlock();
2370 	spin_unlock_irq(q->queue_lock);
2371 
2372 	/* all bios now should be in td->service_queue, issue them */
2373 	for (rw = READ; rw <= WRITE; rw++)
2374 		while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
2375 						NULL)))
2376 			generic_make_request(bio);
2377 
2378 	spin_lock_irq(q->queue_lock);
2379 }
2380 
2381 int blk_throtl_init(struct request_queue *q)
2382 {
2383 	struct throtl_data *td;
2384 	int ret;
2385 
2386 	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2387 	if (!td)
2388 		return -ENOMEM;
2389 	td->latency_buckets = __alloc_percpu(sizeof(struct latency_bucket) *
2390 		LATENCY_BUCKET_SIZE, __alignof__(u64));
2391 	if (!td->latency_buckets) {
2392 		kfree(td);
2393 		return -ENOMEM;
2394 	}
2395 
2396 	INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2397 	throtl_service_queue_init(&td->service_queue);
2398 
2399 	q->td = td;
2400 	td->queue = q;
2401 
2402 	td->limit_valid[LIMIT_MAX] = true;
2403 	td->limit_index = LIMIT_MAX;
2404 	td->low_upgrade_time = jiffies;
2405 	td->low_downgrade_time = jiffies;
2406 
2407 	/* activate policy */
2408 	ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2409 	if (ret) {
2410 		free_percpu(td->latency_buckets);
2411 		kfree(td);
2412 	}
2413 	return ret;
2414 }
2415 
2416 void blk_throtl_exit(struct request_queue *q)
2417 {
2418 	BUG_ON(!q->td);
2419 	throtl_shutdown_wq(q);
2420 	blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2421 	free_percpu(q->td->latency_buckets);
2422 	kfree(q->td);
2423 }
2424 
2425 void blk_throtl_register_queue(struct request_queue *q)
2426 {
2427 	struct throtl_data *td;
2428 	int i;
2429 
2430 	td = q->td;
2431 	BUG_ON(!td);
2432 
2433 	if (blk_queue_nonrot(q)) {
2434 		td->throtl_slice = DFL_THROTL_SLICE_SSD;
2435 		td->filtered_latency = LATENCY_FILTERED_SSD;
2436 	} else {
2437 		td->throtl_slice = DFL_THROTL_SLICE_HD;
2438 		td->filtered_latency = LATENCY_FILTERED_HD;
2439 		for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2440 			td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
2441 	}
2442 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2443 	/* if no low limit, use previous default */
2444 	td->throtl_slice = DFL_THROTL_SLICE_HD;
2445 #endif
2446 
2447 	td->track_bio_latency = !q->mq_ops && !q->request_fn;
2448 	if (!td->track_bio_latency)
2449 		blk_stat_enable_accounting(q);
2450 }
2451 
2452 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2453 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2454 {
2455 	if (!q->td)
2456 		return -EINVAL;
2457 	return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2458 }
2459 
2460 ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2461 	const char *page, size_t count)
2462 {
2463 	unsigned long v;
2464 	unsigned long t;
2465 
2466 	if (!q->td)
2467 		return -EINVAL;
2468 	if (kstrtoul(page, 10, &v))
2469 		return -EINVAL;
2470 	t = msecs_to_jiffies(v);
2471 	if (t == 0 || t > MAX_THROTL_SLICE)
2472 		return -EINVAL;
2473 	q->td->throtl_slice = t;
2474 	return count;
2475 }
2476 #endif
2477 
2478 static int __init throtl_init(void)
2479 {
2480 	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2481 	if (!kthrotld_workqueue)
2482 		panic("Failed to create kthrotld\n");
2483 
2484 	return blkcg_policy_register(&blkcg_policy_throtl);
2485 }
2486 
2487 module_init(throtl_init);
2488