xref: /openbmc/linux/block/blk-iolatency.c (revision 07c7c6bf)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Block rq-qos base io controller
4  *
5  * This works similar to wbt with a few exceptions
6  *
7  * - It's bio based, so the latency covers the whole block layer in addition to
8  *   the actual io.
9  * - We will throttle all IO that comes in here if we need to.
10  * - We use the mean latency over the 100ms window.  This is because writes can
11  *   be particularly fast, which could give us a false sense of the impact of
12  *   other workloads on our protected workload.
13  * - By default there's no throttling, we set the queue_depth to UINT_MAX so
14  *   that we can have as many outstanding bio's as we're allowed to.  Only at
15  *   throttle time do we pay attention to the actual queue depth.
16  *
17  * The hierarchy works like the cpu controller does, we track the latency at
18  * every configured node, and each configured node has it's own independent
19  * queue depth.  This means that we only care about our latency targets at the
20  * peer level.  Some group at the bottom of the hierarchy isn't going to affect
21  * a group at the end of some other path if we're only configred at leaf level.
22  *
23  * Consider the following
24  *
25  *                   root blkg
26  *             /                     \
27  *        fast (target=5ms)     slow (target=10ms)
28  *         /     \                  /        \
29  *       a        b          normal(15ms)   unloved
30  *
31  * "a" and "b" have no target, but their combined io under "fast" cannot exceed
32  * an average latency of 5ms.  If it does then we will throttle the "slow"
33  * group.  In the case of "normal", if it exceeds its 15ms target, we will
34  * throttle "unloved", but nobody else.
35  *
36  * In this example "fast", "slow", and "normal" will be the only groups actually
37  * accounting their io latencies.  We have to walk up the heirarchy to the root
38  * on every submit and complete so we can do the appropriate stat recording and
39  * adjust the queue depth of ourselves if needed.
40  *
41  * There are 2 ways we throttle IO.
42  *
43  * 1) Queue depth throttling.  As we throttle down we will adjust the maximum
44  * number of IO's we're allowed to have in flight.  This starts at (u64)-1 down
45  * to 1.  If the group is only ever submitting IO for itself then this is the
46  * only way we throttle.
47  *
48  * 2) Induced delay throttling.  This is for the case that a group is generating
49  * IO that has to be issued by the root cg to avoid priority inversion. So think
50  * REQ_META or REQ_SWAP.  If we are already at qd == 1 and we're getting a lot
51  * of work done for us on behalf of the root cg and are being asked to scale
52  * down more then we induce a latency at userspace return.  We accumulate the
53  * total amount of time we need to be punished by doing
54  *
55  * total_time += min_lat_nsec - actual_io_completion
56  *
57  * and then at throttle time will do
58  *
59  * throttle_time = min(total_time, NSEC_PER_SEC)
60  *
61  * This induced delay will throttle back the activity that is generating the
62  * root cg issued io's, wethere that's some metadata intensive operation or the
63  * group is using so much memory that it is pushing us into swap.
64  *
65  * Copyright (C) 2018 Josef Bacik
66  */
67 #include <linux/kernel.h>
68 #include <linux/blk_types.h>
69 #include <linux/backing-dev.h>
70 #include <linux/module.h>
71 #include <linux/timer.h>
72 #include <linux/memcontrol.h>
73 #include <linux/sched/loadavg.h>
74 #include <linux/sched/signal.h>
75 #include <trace/events/block.h>
76 #include <linux/blk-mq.h>
77 #include "blk-rq-qos.h"
78 #include "blk-stat.h"
79 #include "blk.h"
80 
81 #define DEFAULT_SCALE_COOKIE 1000000U
82 
83 static struct blkcg_policy blkcg_policy_iolatency;
84 struct iolatency_grp;
85 
86 struct blk_iolatency {
87 	struct rq_qos rqos;
88 	struct timer_list timer;
89 	atomic_t enabled;
90 };
91 
92 static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
93 {
94 	return container_of(rqos, struct blk_iolatency, rqos);
95 }
96 
97 static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
98 {
99 	return atomic_read(&blkiolat->enabled) > 0;
100 }
101 
102 struct child_latency_info {
103 	spinlock_t lock;
104 
105 	/* Last time we adjusted the scale of everybody. */
106 	u64 last_scale_event;
107 
108 	/* The latency that we missed. */
109 	u64 scale_lat;
110 
111 	/* Total io's from all of our children for the last summation. */
112 	u64 nr_samples;
113 
114 	/* The guy who actually changed the latency numbers. */
115 	struct iolatency_grp *scale_grp;
116 
117 	/* Cookie to tell if we need to scale up or down. */
118 	atomic_t scale_cookie;
119 };
120 
121 struct percentile_stats {
122 	u64 total;
123 	u64 missed;
124 };
125 
126 struct latency_stat {
127 	union {
128 		struct percentile_stats ps;
129 		struct blk_rq_stat rqs;
130 	};
131 };
132 
133 struct iolatency_grp {
134 	struct blkg_policy_data pd;
135 	struct latency_stat __percpu *stats;
136 	struct latency_stat cur_stat;
137 	struct blk_iolatency *blkiolat;
138 	struct rq_depth rq_depth;
139 	struct rq_wait rq_wait;
140 	atomic64_t window_start;
141 	atomic_t scale_cookie;
142 	u64 min_lat_nsec;
143 	u64 cur_win_nsec;
144 
145 	/* total running average of our io latency. */
146 	u64 lat_avg;
147 
148 	/* Our current number of IO's for the last summation. */
149 	u64 nr_samples;
150 
151 	bool ssd;
152 	struct child_latency_info child_lat;
153 };
154 
155 #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
156 #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
157 /*
158  * These are the constants used to fake the fixed-point moving average
159  * calculation just like load average.  The call to calc_load() folds
160  * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg.  The sampling
161  * window size is bucketed to try to approximately calculate average
162  * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
163  * elapse immediately.  Note, windows only elapse with IO activity.  Idle
164  * periods extend the most recent window.
165  */
166 #define BLKIOLATENCY_NR_EXP_FACTORS 5
167 #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
168 				      (BLKIOLATENCY_NR_EXP_FACTORS - 1))
169 static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
170 	2045, // exp(1/600) - 600 samples
171 	2039, // exp(1/240) - 240 samples
172 	2031, // exp(1/120) - 120 samples
173 	2023, // exp(1/80)  - 80 samples
174 	2014, // exp(1/60)  - 60 samples
175 };
176 
177 static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
178 {
179 	return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
180 }
181 
182 static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
183 {
184 	return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
185 }
186 
187 static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
188 {
189 	return pd_to_blkg(&iolat->pd);
190 }
191 
192 static inline void latency_stat_init(struct iolatency_grp *iolat,
193 				     struct latency_stat *stat)
194 {
195 	if (iolat->ssd) {
196 		stat->ps.total = 0;
197 		stat->ps.missed = 0;
198 	} else
199 		blk_rq_stat_init(&stat->rqs);
200 }
201 
202 static inline void latency_stat_sum(struct iolatency_grp *iolat,
203 				    struct latency_stat *sum,
204 				    struct latency_stat *stat)
205 {
206 	if (iolat->ssd) {
207 		sum->ps.total += stat->ps.total;
208 		sum->ps.missed += stat->ps.missed;
209 	} else
210 		blk_rq_stat_sum(&sum->rqs, &stat->rqs);
211 }
212 
213 static inline void latency_stat_record_time(struct iolatency_grp *iolat,
214 					    u64 req_time)
215 {
216 	struct latency_stat *stat = get_cpu_ptr(iolat->stats);
217 	if (iolat->ssd) {
218 		if (req_time >= iolat->min_lat_nsec)
219 			stat->ps.missed++;
220 		stat->ps.total++;
221 	} else
222 		blk_rq_stat_add(&stat->rqs, req_time);
223 	put_cpu_ptr(stat);
224 }
225 
226 static inline bool latency_sum_ok(struct iolatency_grp *iolat,
227 				  struct latency_stat *stat)
228 {
229 	if (iolat->ssd) {
230 		u64 thresh = div64_u64(stat->ps.total, 10);
231 		thresh = max(thresh, 1ULL);
232 		return stat->ps.missed < thresh;
233 	}
234 	return stat->rqs.mean <= iolat->min_lat_nsec;
235 }
236 
237 static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
238 				       struct latency_stat *stat)
239 {
240 	if (iolat->ssd)
241 		return stat->ps.total;
242 	return stat->rqs.nr_samples;
243 }
244 
245 static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
246 					      struct latency_stat *stat)
247 {
248 	int exp_idx;
249 
250 	if (iolat->ssd)
251 		return;
252 
253 	/*
254 	 * calc_load() takes in a number stored in fixed point representation.
255 	 * Because we are using this for IO time in ns, the values stored
256 	 * are significantly larger than the FIXED_1 denominator (2048).
257 	 * Therefore, rounding errors in the calculation are negligible and
258 	 * can be ignored.
259 	 */
260 	exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
261 			div64_u64(iolat->cur_win_nsec,
262 				  BLKIOLATENCY_EXP_BUCKET_SIZE));
263 	iolat->lat_avg = calc_load(iolat->lat_avg,
264 				   iolatency_exp_factors[exp_idx],
265 				   stat->rqs.mean);
266 }
267 
268 static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data)
269 {
270 	atomic_dec(&rqw->inflight);
271 	wake_up(&rqw->wait);
272 }
273 
274 static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data)
275 {
276 	struct iolatency_grp *iolat = private_data;
277 	return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
278 }
279 
280 static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
281 				       struct iolatency_grp *iolat,
282 				       bool issue_as_root,
283 				       bool use_memdelay)
284 {
285 	struct rq_wait *rqw = &iolat->rq_wait;
286 	unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
287 
288 	if (use_delay)
289 		blkcg_schedule_throttle(rqos->q, use_memdelay);
290 
291 	/*
292 	 * To avoid priority inversions we want to just take a slot if we are
293 	 * issuing as root.  If we're being killed off there's no point in
294 	 * delaying things, we may have been killed by OOM so throttling may
295 	 * make recovery take even longer, so just let the IO's through so the
296 	 * task can go away.
297 	 */
298 	if (issue_as_root || fatal_signal_pending(current)) {
299 		atomic_inc(&rqw->inflight);
300 		return;
301 	}
302 
303 	rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
304 }
305 
306 #define SCALE_DOWN_FACTOR 2
307 #define SCALE_UP_FACTOR 4
308 
309 static inline unsigned long scale_amount(unsigned long qd, bool up)
310 {
311 	return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
312 }
313 
314 /*
315  * We scale the qd down faster than we scale up, so we need to use this helper
316  * to adjust the scale_cookie accordingly so we don't prematurely get
317  * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
318  *
319  * Each group has their own local copy of the last scale cookie they saw, so if
320  * the global scale cookie goes up or down they know which way they need to go
321  * based on their last knowledge of it.
322  */
323 static void scale_cookie_change(struct blk_iolatency *blkiolat,
324 				struct child_latency_info *lat_info,
325 				bool up)
326 {
327 	unsigned long qd = blkiolat->rqos.q->nr_requests;
328 	unsigned long scale = scale_amount(qd, up);
329 	unsigned long old = atomic_read(&lat_info->scale_cookie);
330 	unsigned long max_scale = qd << 1;
331 	unsigned long diff = 0;
332 
333 	if (old < DEFAULT_SCALE_COOKIE)
334 		diff = DEFAULT_SCALE_COOKIE - old;
335 
336 	if (up) {
337 		if (scale + old > DEFAULT_SCALE_COOKIE)
338 			atomic_set(&lat_info->scale_cookie,
339 				   DEFAULT_SCALE_COOKIE);
340 		else if (diff > qd)
341 			atomic_inc(&lat_info->scale_cookie);
342 		else
343 			atomic_add(scale, &lat_info->scale_cookie);
344 	} else {
345 		/*
346 		 * We don't want to dig a hole so deep that it takes us hours to
347 		 * dig out of it.  Just enough that we don't throttle/unthrottle
348 		 * with jagged workloads but can still unthrottle once pressure
349 		 * has sufficiently dissipated.
350 		 */
351 		if (diff > qd) {
352 			if (diff < max_scale)
353 				atomic_dec(&lat_info->scale_cookie);
354 		} else {
355 			atomic_sub(scale, &lat_info->scale_cookie);
356 		}
357 	}
358 }
359 
360 /*
361  * Change the queue depth of the iolatency_grp.  We add/subtract 1/16th of the
362  * queue depth at a time so we don't get wild swings and hopefully dial in to
363  * fairer distribution of the overall queue depth.
364  */
365 static void scale_change(struct iolatency_grp *iolat, bool up)
366 {
367 	unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
368 	unsigned long scale = scale_amount(qd, up);
369 	unsigned long old = iolat->rq_depth.max_depth;
370 
371 	if (old > qd)
372 		old = qd;
373 
374 	if (up) {
375 		if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
376 			return;
377 
378 		if (old < qd) {
379 			old += scale;
380 			old = min(old, qd);
381 			iolat->rq_depth.max_depth = old;
382 			wake_up_all(&iolat->rq_wait.wait);
383 		}
384 	} else {
385 		old >>= 1;
386 		iolat->rq_depth.max_depth = max(old, 1UL);
387 	}
388 }
389 
390 /* Check our parent and see if the scale cookie has changed. */
391 static void check_scale_change(struct iolatency_grp *iolat)
392 {
393 	struct iolatency_grp *parent;
394 	struct child_latency_info *lat_info;
395 	unsigned int cur_cookie;
396 	unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
397 	u64 scale_lat;
398 	unsigned int old;
399 	int direction = 0;
400 
401 	if (lat_to_blkg(iolat)->parent == NULL)
402 		return;
403 
404 	parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
405 	if (!parent)
406 		return;
407 
408 	lat_info = &parent->child_lat;
409 	cur_cookie = atomic_read(&lat_info->scale_cookie);
410 	scale_lat = READ_ONCE(lat_info->scale_lat);
411 
412 	if (cur_cookie < our_cookie)
413 		direction = -1;
414 	else if (cur_cookie > our_cookie)
415 		direction = 1;
416 	else
417 		return;
418 
419 	old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
420 
421 	/* Somebody beat us to the punch, just bail. */
422 	if (old != our_cookie)
423 		return;
424 
425 	if (direction < 0 && iolat->min_lat_nsec) {
426 		u64 samples_thresh;
427 
428 		if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
429 			return;
430 
431 		/*
432 		 * Sometimes high priority groups are their own worst enemy, so
433 		 * instead of taking it out on some poor other group that did 5%
434 		 * or less of the IO's for the last summation just skip this
435 		 * scale down event.
436 		 */
437 		samples_thresh = lat_info->nr_samples * 5;
438 		samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
439 		if (iolat->nr_samples <= samples_thresh)
440 			return;
441 	}
442 
443 	/* We're as low as we can go. */
444 	if (iolat->rq_depth.max_depth == 1 && direction < 0) {
445 		blkcg_use_delay(lat_to_blkg(iolat));
446 		return;
447 	}
448 
449 	/* We're back to the default cookie, unthrottle all the things. */
450 	if (cur_cookie == DEFAULT_SCALE_COOKIE) {
451 		blkcg_clear_delay(lat_to_blkg(iolat));
452 		iolat->rq_depth.max_depth = UINT_MAX;
453 		wake_up_all(&iolat->rq_wait.wait);
454 		return;
455 	}
456 
457 	scale_change(iolat, direction > 0);
458 }
459 
460 static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
461 {
462 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
463 	struct blkcg_gq *blkg = bio->bi_blkg;
464 	bool issue_as_root = bio_issue_as_root_blkg(bio);
465 
466 	if (!blk_iolatency_enabled(blkiolat))
467 		return;
468 
469 	while (blkg && blkg->parent) {
470 		struct iolatency_grp *iolat = blkg_to_lat(blkg);
471 		if (!iolat) {
472 			blkg = blkg->parent;
473 			continue;
474 		}
475 
476 		check_scale_change(iolat);
477 		__blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
478 				     (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
479 		blkg = blkg->parent;
480 	}
481 	if (!timer_pending(&blkiolat->timer))
482 		mod_timer(&blkiolat->timer, jiffies + HZ);
483 }
484 
485 static void iolatency_record_time(struct iolatency_grp *iolat,
486 				  struct bio_issue *issue, u64 now,
487 				  bool issue_as_root)
488 {
489 	u64 start = bio_issue_time(issue);
490 	u64 req_time;
491 
492 	/*
493 	 * Have to do this so we are truncated to the correct time that our
494 	 * issue is truncated to.
495 	 */
496 	now = __bio_issue_time(now);
497 
498 	if (now <= start)
499 		return;
500 
501 	req_time = now - start;
502 
503 	/*
504 	 * We don't want to count issue_as_root bio's in the cgroups latency
505 	 * statistics as it could skew the numbers downwards.
506 	 */
507 	if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
508 		u64 sub = iolat->min_lat_nsec;
509 		if (req_time < sub)
510 			blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
511 		return;
512 	}
513 
514 	latency_stat_record_time(iolat, req_time);
515 }
516 
517 #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
518 #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
519 
520 static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
521 {
522 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
523 	struct iolatency_grp *parent;
524 	struct child_latency_info *lat_info;
525 	struct latency_stat stat;
526 	unsigned long flags;
527 	int cpu;
528 
529 	latency_stat_init(iolat, &stat);
530 	preempt_disable();
531 	for_each_online_cpu(cpu) {
532 		struct latency_stat *s;
533 		s = per_cpu_ptr(iolat->stats, cpu);
534 		latency_stat_sum(iolat, &stat, s);
535 		latency_stat_init(iolat, s);
536 	}
537 	preempt_enable();
538 
539 	parent = blkg_to_lat(blkg->parent);
540 	if (!parent)
541 		return;
542 
543 	lat_info = &parent->child_lat;
544 
545 	iolat_update_total_lat_avg(iolat, &stat);
546 
547 	/* Everything is ok and we don't need to adjust the scale. */
548 	if (latency_sum_ok(iolat, &stat) &&
549 	    atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
550 		return;
551 
552 	/* Somebody beat us to the punch, just bail. */
553 	spin_lock_irqsave(&lat_info->lock, flags);
554 
555 	latency_stat_sum(iolat, &iolat->cur_stat, &stat);
556 	lat_info->nr_samples -= iolat->nr_samples;
557 	lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
558 	iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
559 
560 	if ((lat_info->last_scale_event >= now ||
561 	    now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
562 		goto out;
563 
564 	if (latency_sum_ok(iolat, &iolat->cur_stat) &&
565 	    latency_sum_ok(iolat, &stat)) {
566 		if (latency_stat_samples(iolat, &iolat->cur_stat) <
567 		    BLKIOLATENCY_MIN_GOOD_SAMPLES)
568 			goto out;
569 		if (lat_info->scale_grp == iolat) {
570 			lat_info->last_scale_event = now;
571 			scale_cookie_change(iolat->blkiolat, lat_info, true);
572 		}
573 	} else if (lat_info->scale_lat == 0 ||
574 		   lat_info->scale_lat >= iolat->min_lat_nsec) {
575 		lat_info->last_scale_event = now;
576 		if (!lat_info->scale_grp ||
577 		    lat_info->scale_lat > iolat->min_lat_nsec) {
578 			WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
579 			lat_info->scale_grp = iolat;
580 		}
581 		scale_cookie_change(iolat->blkiolat, lat_info, false);
582 	}
583 	latency_stat_init(iolat, &iolat->cur_stat);
584 out:
585 	spin_unlock_irqrestore(&lat_info->lock, flags);
586 }
587 
588 static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
589 {
590 	struct blkcg_gq *blkg;
591 	struct rq_wait *rqw;
592 	struct iolatency_grp *iolat;
593 	u64 window_start;
594 	u64 now = ktime_to_ns(ktime_get());
595 	bool issue_as_root = bio_issue_as_root_blkg(bio);
596 	bool enabled = false;
597 	int inflight = 0;
598 
599 	blkg = bio->bi_blkg;
600 	if (!blkg || !bio_flagged(bio, BIO_TRACKED))
601 		return;
602 
603 	iolat = blkg_to_lat(bio->bi_blkg);
604 	if (!iolat)
605 		return;
606 
607 	enabled = blk_iolatency_enabled(iolat->blkiolat);
608 	if (!enabled)
609 		return;
610 
611 	while (blkg && blkg->parent) {
612 		iolat = blkg_to_lat(blkg);
613 		if (!iolat) {
614 			blkg = blkg->parent;
615 			continue;
616 		}
617 		rqw = &iolat->rq_wait;
618 
619 		inflight = atomic_dec_return(&rqw->inflight);
620 		WARN_ON_ONCE(inflight < 0);
621 		if (iolat->min_lat_nsec == 0)
622 			goto next;
623 		iolatency_record_time(iolat, &bio->bi_issue, now,
624 				      issue_as_root);
625 		window_start = atomic64_read(&iolat->window_start);
626 		if (now > window_start &&
627 		    (now - window_start) >= iolat->cur_win_nsec) {
628 			if (atomic64_cmpxchg(&iolat->window_start,
629 					window_start, now) == window_start)
630 				iolatency_check_latencies(iolat, now);
631 		}
632 next:
633 		wake_up(&rqw->wait);
634 		blkg = blkg->parent;
635 	}
636 }
637 
638 static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
639 {
640 	struct blkcg_gq *blkg;
641 
642 	blkg = bio->bi_blkg;
643 	while (blkg && blkg->parent) {
644 		struct rq_wait *rqw;
645 		struct iolatency_grp *iolat;
646 
647 		iolat = blkg_to_lat(blkg);
648 		if (!iolat)
649 			goto next;
650 
651 		rqw = &iolat->rq_wait;
652 		atomic_dec(&rqw->inflight);
653 		wake_up(&rqw->wait);
654 next:
655 		blkg = blkg->parent;
656 	}
657 }
658 
659 static void blkcg_iolatency_exit(struct rq_qos *rqos)
660 {
661 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
662 
663 	del_timer_sync(&blkiolat->timer);
664 	blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
665 	kfree(blkiolat);
666 }
667 
668 static struct rq_qos_ops blkcg_iolatency_ops = {
669 	.throttle = blkcg_iolatency_throttle,
670 	.cleanup = blkcg_iolatency_cleanup,
671 	.done_bio = blkcg_iolatency_done_bio,
672 	.exit = blkcg_iolatency_exit,
673 };
674 
675 static void blkiolatency_timer_fn(struct timer_list *t)
676 {
677 	struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
678 	struct blkcg_gq *blkg;
679 	struct cgroup_subsys_state *pos_css;
680 	u64 now = ktime_to_ns(ktime_get());
681 
682 	rcu_read_lock();
683 	blkg_for_each_descendant_pre(blkg, pos_css,
684 				     blkiolat->rqos.q->root_blkg) {
685 		struct iolatency_grp *iolat;
686 		struct child_latency_info *lat_info;
687 		unsigned long flags;
688 		u64 cookie;
689 
690 		/*
691 		 * We could be exiting, don't access the pd unless we have a
692 		 * ref on the blkg.
693 		 */
694 		if (!blkg_tryget(blkg))
695 			continue;
696 
697 		iolat = blkg_to_lat(blkg);
698 		if (!iolat)
699 			goto next;
700 
701 		lat_info = &iolat->child_lat;
702 		cookie = atomic_read(&lat_info->scale_cookie);
703 
704 		if (cookie >= DEFAULT_SCALE_COOKIE)
705 			goto next;
706 
707 		spin_lock_irqsave(&lat_info->lock, flags);
708 		if (lat_info->last_scale_event >= now)
709 			goto next_lock;
710 
711 		/*
712 		 * We scaled down but don't have a scale_grp, scale up and carry
713 		 * on.
714 		 */
715 		if (lat_info->scale_grp == NULL) {
716 			scale_cookie_change(iolat->blkiolat, lat_info, true);
717 			goto next_lock;
718 		}
719 
720 		/*
721 		 * It's been 5 seconds since our last scale event, clear the
722 		 * scale grp in case the group that needed the scale down isn't
723 		 * doing any IO currently.
724 		 */
725 		if (now - lat_info->last_scale_event >=
726 		    ((u64)NSEC_PER_SEC * 5))
727 			lat_info->scale_grp = NULL;
728 next_lock:
729 		spin_unlock_irqrestore(&lat_info->lock, flags);
730 next:
731 		blkg_put(blkg);
732 	}
733 	rcu_read_unlock();
734 }
735 
736 int blk_iolatency_init(struct request_queue *q)
737 {
738 	struct blk_iolatency *blkiolat;
739 	struct rq_qos *rqos;
740 	int ret;
741 
742 	blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
743 	if (!blkiolat)
744 		return -ENOMEM;
745 
746 	rqos = &blkiolat->rqos;
747 	rqos->id = RQ_QOS_CGROUP;
748 	rqos->ops = &blkcg_iolatency_ops;
749 	rqos->q = q;
750 
751 	rq_qos_add(q, rqos);
752 
753 	ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
754 	if (ret) {
755 		rq_qos_del(q, rqos);
756 		kfree(blkiolat);
757 		return ret;
758 	}
759 
760 	timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
761 
762 	return 0;
763 }
764 
765 /*
766  * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
767  * return 0.
768  */
769 static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
770 {
771 	struct iolatency_grp *iolat = blkg_to_lat(blkg);
772 	u64 oldval = iolat->min_lat_nsec;
773 
774 	iolat->min_lat_nsec = val;
775 	iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
776 	iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
777 				    BLKIOLATENCY_MAX_WIN_SIZE);
778 
779 	if (!oldval && val)
780 		return 1;
781 	if (oldval && !val)
782 		return -1;
783 	return 0;
784 }
785 
786 static void iolatency_clear_scaling(struct blkcg_gq *blkg)
787 {
788 	if (blkg->parent) {
789 		struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
790 		struct child_latency_info *lat_info;
791 		if (!iolat)
792 			return;
793 
794 		lat_info = &iolat->child_lat;
795 		spin_lock(&lat_info->lock);
796 		atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
797 		lat_info->last_scale_event = 0;
798 		lat_info->scale_grp = NULL;
799 		lat_info->scale_lat = 0;
800 		spin_unlock(&lat_info->lock);
801 	}
802 }
803 
804 static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
805 			     size_t nbytes, loff_t off)
806 {
807 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
808 	struct blkcg_gq *blkg;
809 	struct blkg_conf_ctx ctx;
810 	struct iolatency_grp *iolat;
811 	char *p, *tok;
812 	u64 lat_val = 0;
813 	u64 oldval;
814 	int ret;
815 	int enable = 0;
816 
817 	ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
818 	if (ret)
819 		return ret;
820 
821 	iolat = blkg_to_lat(ctx.blkg);
822 	p = ctx.body;
823 
824 	ret = -EINVAL;
825 	while ((tok = strsep(&p, " "))) {
826 		char key[16];
827 		char val[21];	/* 18446744073709551616 */
828 
829 		if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
830 			goto out;
831 
832 		if (!strcmp(key, "target")) {
833 			u64 v;
834 
835 			if (!strcmp(val, "max"))
836 				lat_val = 0;
837 			else if (sscanf(val, "%llu", &v) == 1)
838 				lat_val = v * NSEC_PER_USEC;
839 			else
840 				goto out;
841 		} else {
842 			goto out;
843 		}
844 	}
845 
846 	/* Walk up the tree to see if our new val is lower than it should be. */
847 	blkg = ctx.blkg;
848 	oldval = iolat->min_lat_nsec;
849 
850 	enable = iolatency_set_min_lat_nsec(blkg, lat_val);
851 	if (enable) {
852 		WARN_ON_ONCE(!blk_get_queue(blkg->q));
853 		blkg_get(blkg);
854 	}
855 
856 	if (oldval != iolat->min_lat_nsec) {
857 		iolatency_clear_scaling(blkg);
858 	}
859 
860 	ret = 0;
861 out:
862 	blkg_conf_finish(&ctx);
863 	if (ret == 0 && enable) {
864 		struct iolatency_grp *tmp = blkg_to_lat(blkg);
865 		struct blk_iolatency *blkiolat = tmp->blkiolat;
866 
867 		blk_mq_freeze_queue(blkg->q);
868 
869 		if (enable == 1)
870 			atomic_inc(&blkiolat->enabled);
871 		else if (enable == -1)
872 			atomic_dec(&blkiolat->enabled);
873 		else
874 			WARN_ON_ONCE(1);
875 
876 		blk_mq_unfreeze_queue(blkg->q);
877 
878 		blkg_put(blkg);
879 		blk_put_queue(blkg->q);
880 	}
881 	return ret ?: nbytes;
882 }
883 
884 static u64 iolatency_prfill_limit(struct seq_file *sf,
885 				  struct blkg_policy_data *pd, int off)
886 {
887 	struct iolatency_grp *iolat = pd_to_lat(pd);
888 	const char *dname = blkg_dev_name(pd->blkg);
889 
890 	if (!dname || !iolat->min_lat_nsec)
891 		return 0;
892 	seq_printf(sf, "%s target=%llu\n",
893 		   dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
894 	return 0;
895 }
896 
897 static int iolatency_print_limit(struct seq_file *sf, void *v)
898 {
899 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
900 			  iolatency_prfill_limit,
901 			  &blkcg_policy_iolatency, seq_cft(sf)->private, false);
902 	return 0;
903 }
904 
905 static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
906 				 size_t size)
907 {
908 	struct latency_stat stat;
909 	int cpu;
910 
911 	latency_stat_init(iolat, &stat);
912 	preempt_disable();
913 	for_each_online_cpu(cpu) {
914 		struct latency_stat *s;
915 		s = per_cpu_ptr(iolat->stats, cpu);
916 		latency_stat_sum(iolat, &stat, s);
917 	}
918 	preempt_enable();
919 
920 	if (iolat->rq_depth.max_depth == UINT_MAX)
921 		return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
922 				 (unsigned long long)stat.ps.missed,
923 				 (unsigned long long)stat.ps.total);
924 	return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
925 			 (unsigned long long)stat.ps.missed,
926 			 (unsigned long long)stat.ps.total,
927 			 iolat->rq_depth.max_depth);
928 }
929 
930 static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
931 				size_t size)
932 {
933 	struct iolatency_grp *iolat = pd_to_lat(pd);
934 	unsigned long long avg_lat;
935 	unsigned long long cur_win;
936 
937 	if (iolat->ssd)
938 		return iolatency_ssd_stat(iolat, buf, size);
939 
940 	avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
941 	cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
942 	if (iolat->rq_depth.max_depth == UINT_MAX)
943 		return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
944 				 avg_lat, cur_win);
945 
946 	return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
947 			 iolat->rq_depth.max_depth, avg_lat, cur_win);
948 }
949 
950 
951 static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
952 {
953 	struct iolatency_grp *iolat;
954 
955 	iolat = kzalloc_node(sizeof(*iolat), gfp, node);
956 	if (!iolat)
957 		return NULL;
958 	iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
959 				       __alignof__(struct latency_stat), gfp);
960 	if (!iolat->stats) {
961 		kfree(iolat);
962 		return NULL;
963 	}
964 	return &iolat->pd;
965 }
966 
967 static void iolatency_pd_init(struct blkg_policy_data *pd)
968 {
969 	struct iolatency_grp *iolat = pd_to_lat(pd);
970 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
971 	struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
972 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
973 	u64 now = ktime_to_ns(ktime_get());
974 	int cpu;
975 
976 	if (blk_queue_nonrot(blkg->q))
977 		iolat->ssd = true;
978 	else
979 		iolat->ssd = false;
980 
981 	for_each_possible_cpu(cpu) {
982 		struct latency_stat *stat;
983 		stat = per_cpu_ptr(iolat->stats, cpu);
984 		latency_stat_init(iolat, stat);
985 	}
986 
987 	latency_stat_init(iolat, &iolat->cur_stat);
988 	rq_wait_init(&iolat->rq_wait);
989 	spin_lock_init(&iolat->child_lat.lock);
990 	iolat->rq_depth.queue_depth = blkg->q->nr_requests;
991 	iolat->rq_depth.max_depth = UINT_MAX;
992 	iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
993 	iolat->blkiolat = blkiolat;
994 	iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
995 	atomic64_set(&iolat->window_start, now);
996 
997 	/*
998 	 * We init things in list order, so the pd for the parent may not be
999 	 * init'ed yet for whatever reason.
1000 	 */
1001 	if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
1002 		struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
1003 		atomic_set(&iolat->scale_cookie,
1004 			   atomic_read(&parent->child_lat.scale_cookie));
1005 	} else {
1006 		atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
1007 	}
1008 
1009 	atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
1010 }
1011 
1012 static void iolatency_pd_offline(struct blkg_policy_data *pd)
1013 {
1014 	struct iolatency_grp *iolat = pd_to_lat(pd);
1015 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
1016 	struct blk_iolatency *blkiolat = iolat->blkiolat;
1017 	int ret;
1018 
1019 	ret = iolatency_set_min_lat_nsec(blkg, 0);
1020 	if (ret == 1)
1021 		atomic_inc(&blkiolat->enabled);
1022 	if (ret == -1)
1023 		atomic_dec(&blkiolat->enabled);
1024 	iolatency_clear_scaling(blkg);
1025 }
1026 
1027 static void iolatency_pd_free(struct blkg_policy_data *pd)
1028 {
1029 	struct iolatency_grp *iolat = pd_to_lat(pd);
1030 	free_percpu(iolat->stats);
1031 	kfree(iolat);
1032 }
1033 
1034 static struct cftype iolatency_files[] = {
1035 	{
1036 		.name = "latency",
1037 		.flags = CFTYPE_NOT_ON_ROOT,
1038 		.seq_show = iolatency_print_limit,
1039 		.write = iolatency_set_limit,
1040 	},
1041 	{}
1042 };
1043 
1044 static struct blkcg_policy blkcg_policy_iolatency = {
1045 	.dfl_cftypes	= iolatency_files,
1046 	.pd_alloc_fn	= iolatency_pd_alloc,
1047 	.pd_init_fn	= iolatency_pd_init,
1048 	.pd_offline_fn	= iolatency_pd_offline,
1049 	.pd_free_fn	= iolatency_pd_free,
1050 	.pd_stat_fn	= iolatency_pd_stat,
1051 };
1052 
1053 static int __init iolatency_init(void)
1054 {
1055 	return blkcg_policy_register(&blkcg_policy_iolatency);
1056 }
1057 
1058 static void __exit iolatency_exit(void)
1059 {
1060 	return blkcg_policy_unregister(&blkcg_policy_iolatency);
1061 }
1062 
1063 module_init(iolatency_init);
1064 module_exit(iolatency_exit);
1065