xref: /openbmc/linux/block/blk-iolatency.c (revision 58f9d806)
1 /*
2  * Block rq-qos base io controller
3  *
4  * This works similar to wbt with a few exceptions
5  *
6  * - It's bio based, so the latency covers the whole block layer in addition to
7  *   the actual io.
8  * - We will throttle all IO that comes in here if we need to.
9  * - We use the mean latency over the 100ms window.  This is because writes can
10  *   be particularly fast, which could give us a false sense of the impact of
11  *   other workloads on our protected workload.
12  * - By default there's no throttling, we set the queue_depth to UINT_MAX so
13  *   that we can have as many outstanding bio's as we're allowed to.  Only at
14  *   throttle time do we pay attention to the actual queue depth.
15  *
16  * The hierarchy works like the cpu controller does, we track the latency at
17  * every configured node, and each configured node has it's own independent
18  * queue depth.  This means that we only care about our latency targets at the
19  * peer level.  Some group at the bottom of the hierarchy isn't going to affect
20  * a group at the end of some other path if we're only configred at leaf level.
21  *
22  * Consider the following
23  *
24  *                   root blkg
25  *             /                     \
26  *        fast (target=5ms)     slow (target=10ms)
27  *         /     \                  /        \
28  *       a        b          normal(15ms)   unloved
29  *
30  * "a" and "b" have no target, but their combined io under "fast" cannot exceed
31  * an average latency of 5ms.  If it does then we will throttle the "slow"
32  * group.  In the case of "normal", if it exceeds its 15ms target, we will
33  * throttle "unloved", but nobody else.
34  *
35  * In this example "fast", "slow", and "normal" will be the only groups actually
36  * accounting their io latencies.  We have to walk up the heirarchy to the root
37  * on every submit and complete so we can do the appropriate stat recording and
38  * adjust the queue depth of ourselves if needed.
39  *
40  * There are 2 ways we throttle IO.
41  *
42  * 1) Queue depth throttling.  As we throttle down we will adjust the maximum
43  * number of IO's we're allowed to have in flight.  This starts at (u64)-1 down
44  * to 1.  If the group is only ever submitting IO for itself then this is the
45  * only way we throttle.
46  *
47  * 2) Induced delay throttling.  This is for the case that a group is generating
48  * IO that has to be issued by the root cg to avoid priority inversion. So think
49  * REQ_META or REQ_SWAP.  If we are already at qd == 1 and we're getting a lot
50  * of work done for us on behalf of the root cg and are being asked to scale
51  * down more then we induce a latency at userspace return.  We accumulate the
52  * total amount of time we need to be punished by doing
53  *
54  * total_time += min_lat_nsec - actual_io_completion
55  *
56  * and then at throttle time will do
57  *
58  * throttle_time = min(total_time, NSEC_PER_SEC)
59  *
60  * This induced delay will throttle back the activity that is generating the
61  * root cg issued io's, wethere that's some metadata intensive operation or the
62  * group is using so much memory that it is pushing us into swap.
63  *
64  * Copyright (C) 2018 Josef Bacik
65  */
66 #include <linux/kernel.h>
67 #include <linux/blk_types.h>
68 #include <linux/backing-dev.h>
69 #include <linux/module.h>
70 #include <linux/timer.h>
71 #include <linux/memcontrol.h>
72 #include <linux/sched/loadavg.h>
73 #include <linux/sched/signal.h>
74 #include <trace/events/block.h>
75 #include "blk-rq-qos.h"
76 #include "blk-stat.h"
77 
78 #define DEFAULT_SCALE_COOKIE 1000000U
79 
80 static struct blkcg_policy blkcg_policy_iolatency;
81 struct iolatency_grp;
82 
83 struct blk_iolatency {
84 	struct rq_qos rqos;
85 	struct timer_list timer;
86 	atomic_t enabled;
87 };
88 
89 static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
90 {
91 	return container_of(rqos, struct blk_iolatency, rqos);
92 }
93 
94 static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
95 {
96 	return atomic_read(&blkiolat->enabled) > 0;
97 }
98 
99 struct child_latency_info {
100 	spinlock_t lock;
101 
102 	/* Last time we adjusted the scale of everybody. */
103 	u64 last_scale_event;
104 
105 	/* The latency that we missed. */
106 	u64 scale_lat;
107 
108 	/* Total io's from all of our children for the last summation. */
109 	u64 nr_samples;
110 
111 	/* The guy who actually changed the latency numbers. */
112 	struct iolatency_grp *scale_grp;
113 
114 	/* Cookie to tell if we need to scale up or down. */
115 	atomic_t scale_cookie;
116 };
117 
118 struct percentile_stats {
119 	u64 total;
120 	u64 missed;
121 };
122 
123 struct latency_stat {
124 	union {
125 		struct percentile_stats ps;
126 		struct blk_rq_stat rqs;
127 	};
128 };
129 
130 struct iolatency_grp {
131 	struct blkg_policy_data pd;
132 	struct latency_stat __percpu *stats;
133 	struct latency_stat cur_stat;
134 	struct blk_iolatency *blkiolat;
135 	struct rq_depth rq_depth;
136 	struct rq_wait rq_wait;
137 	atomic64_t window_start;
138 	atomic_t scale_cookie;
139 	u64 min_lat_nsec;
140 	u64 cur_win_nsec;
141 
142 	/* total running average of our io latency. */
143 	u64 lat_avg;
144 
145 	/* Our current number of IO's for the last summation. */
146 	u64 nr_samples;
147 
148 	bool ssd;
149 	struct child_latency_info child_lat;
150 };
151 
152 #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
153 #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
154 /*
155  * These are the constants used to fake the fixed-point moving average
156  * calculation just like load average.  The call to calc_load() folds
157  * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg.  The sampling
158  * window size is bucketed to try to approximately calculate average
159  * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
160  * elapse immediately.  Note, windows only elapse with IO activity.  Idle
161  * periods extend the most recent window.
162  */
163 #define BLKIOLATENCY_NR_EXP_FACTORS 5
164 #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
165 				      (BLKIOLATENCY_NR_EXP_FACTORS - 1))
166 static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
167 	2045, // exp(1/600) - 600 samples
168 	2039, // exp(1/240) - 240 samples
169 	2031, // exp(1/120) - 120 samples
170 	2023, // exp(1/80)  - 80 samples
171 	2014, // exp(1/60)  - 60 samples
172 };
173 
174 static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
175 {
176 	return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
177 }
178 
179 static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
180 {
181 	return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
182 }
183 
184 static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
185 {
186 	return pd_to_blkg(&iolat->pd);
187 }
188 
189 static inline void latency_stat_init(struct iolatency_grp *iolat,
190 				     struct latency_stat *stat)
191 {
192 	if (iolat->ssd) {
193 		stat->ps.total = 0;
194 		stat->ps.missed = 0;
195 	} else
196 		blk_rq_stat_init(&stat->rqs);
197 }
198 
199 static inline void latency_stat_sum(struct iolatency_grp *iolat,
200 				    struct latency_stat *sum,
201 				    struct latency_stat *stat)
202 {
203 	if (iolat->ssd) {
204 		sum->ps.total += stat->ps.total;
205 		sum->ps.missed += stat->ps.missed;
206 	} else
207 		blk_rq_stat_sum(&sum->rqs, &stat->rqs);
208 }
209 
210 static inline void latency_stat_record_time(struct iolatency_grp *iolat,
211 					    u64 req_time)
212 {
213 	struct latency_stat *stat = get_cpu_ptr(iolat->stats);
214 	if (iolat->ssd) {
215 		if (req_time >= iolat->min_lat_nsec)
216 			stat->ps.missed++;
217 		stat->ps.total++;
218 	} else
219 		blk_rq_stat_add(&stat->rqs, req_time);
220 	put_cpu_ptr(stat);
221 }
222 
223 static inline bool latency_sum_ok(struct iolatency_grp *iolat,
224 				  struct latency_stat *stat)
225 {
226 	if (iolat->ssd) {
227 		u64 thresh = div64_u64(stat->ps.total, 10);
228 		thresh = max(thresh, 1ULL);
229 		return stat->ps.missed < thresh;
230 	}
231 	return stat->rqs.mean <= iolat->min_lat_nsec;
232 }
233 
234 static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
235 				       struct latency_stat *stat)
236 {
237 	if (iolat->ssd)
238 		return stat->ps.total;
239 	return stat->rqs.nr_samples;
240 }
241 
242 static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
243 					      struct latency_stat *stat)
244 {
245 	int exp_idx;
246 
247 	if (iolat->ssd)
248 		return;
249 
250 	/*
251 	 * calc_load() takes in a number stored in fixed point representation.
252 	 * Because we are using this for IO time in ns, the values stored
253 	 * are significantly larger than the FIXED_1 denominator (2048).
254 	 * Therefore, rounding errors in the calculation are negligible and
255 	 * can be ignored.
256 	 */
257 	exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
258 			div64_u64(iolat->cur_win_nsec,
259 				  BLKIOLATENCY_EXP_BUCKET_SIZE));
260 	iolat->lat_avg = calc_load(iolat->lat_avg,
261 				   iolatency_exp_factors[exp_idx],
262 				   stat->rqs.mean);
263 }
264 
265 static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
266 				       wait_queue_entry_t *wait,
267 				       bool first_block)
268 {
269 	struct rq_wait *rqw = &iolat->rq_wait;
270 
271 	if (first_block && waitqueue_active(&rqw->wait) &&
272 	    rqw->wait.head.next != &wait->entry)
273 		return false;
274 	return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
275 }
276 
277 static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
278 				       struct iolatency_grp *iolat,
279 				       spinlock_t *lock, bool issue_as_root,
280 				       bool use_memdelay)
281 	__releases(lock)
282 	__acquires(lock)
283 {
284 	struct rq_wait *rqw = &iolat->rq_wait;
285 	unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
286 	DEFINE_WAIT(wait);
287 	bool first_block = true;
288 
289 	if (use_delay)
290 		blkcg_schedule_throttle(rqos->q, use_memdelay);
291 
292 	/*
293 	 * To avoid priority inversions we want to just take a slot if we are
294 	 * issuing as root.  If we're being killed off there's no point in
295 	 * delaying things, we may have been killed by OOM so throttling may
296 	 * make recovery take even longer, so just let the IO's through so the
297 	 * task can go away.
298 	 */
299 	if (issue_as_root || fatal_signal_pending(current)) {
300 		atomic_inc(&rqw->inflight);
301 		return;
302 	}
303 
304 	if (iolatency_may_queue(iolat, &wait, first_block))
305 		return;
306 
307 	do {
308 		prepare_to_wait_exclusive(&rqw->wait, &wait,
309 					  TASK_UNINTERRUPTIBLE);
310 
311 		if (iolatency_may_queue(iolat, &wait, first_block))
312 			break;
313 		first_block = false;
314 
315 		if (lock) {
316 			spin_unlock_irq(lock);
317 			io_schedule();
318 			spin_lock_irq(lock);
319 		} else {
320 			io_schedule();
321 		}
322 	} while (1);
323 
324 	finish_wait(&rqw->wait, &wait);
325 }
326 
327 #define SCALE_DOWN_FACTOR 2
328 #define SCALE_UP_FACTOR 4
329 
330 static inline unsigned long scale_amount(unsigned long qd, bool up)
331 {
332 	return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
333 }
334 
335 /*
336  * We scale the qd down faster than we scale up, so we need to use this helper
337  * to adjust the scale_cookie accordingly so we don't prematurely get
338  * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
339  *
340  * Each group has their own local copy of the last scale cookie they saw, so if
341  * the global scale cookie goes up or down they know which way they need to go
342  * based on their last knowledge of it.
343  */
344 static void scale_cookie_change(struct blk_iolatency *blkiolat,
345 				struct child_latency_info *lat_info,
346 				bool up)
347 {
348 	unsigned long qd = blkiolat->rqos.q->nr_requests;
349 	unsigned long scale = scale_amount(qd, up);
350 	unsigned long old = atomic_read(&lat_info->scale_cookie);
351 	unsigned long max_scale = qd << 1;
352 	unsigned long diff = 0;
353 
354 	if (old < DEFAULT_SCALE_COOKIE)
355 		diff = DEFAULT_SCALE_COOKIE - old;
356 
357 	if (up) {
358 		if (scale + old > DEFAULT_SCALE_COOKIE)
359 			atomic_set(&lat_info->scale_cookie,
360 				   DEFAULT_SCALE_COOKIE);
361 		else if (diff > qd)
362 			atomic_inc(&lat_info->scale_cookie);
363 		else
364 			atomic_add(scale, &lat_info->scale_cookie);
365 	} else {
366 		/*
367 		 * We don't want to dig a hole so deep that it takes us hours to
368 		 * dig out of it.  Just enough that we don't throttle/unthrottle
369 		 * with jagged workloads but can still unthrottle once pressure
370 		 * has sufficiently dissipated.
371 		 */
372 		if (diff > qd) {
373 			if (diff < max_scale)
374 				atomic_dec(&lat_info->scale_cookie);
375 		} else {
376 			atomic_sub(scale, &lat_info->scale_cookie);
377 		}
378 	}
379 }
380 
381 /*
382  * Change the queue depth of the iolatency_grp.  We add/subtract 1/16th of the
383  * queue depth at a time so we don't get wild swings and hopefully dial in to
384  * fairer distribution of the overall queue depth.
385  */
386 static void scale_change(struct iolatency_grp *iolat, bool up)
387 {
388 	unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
389 	unsigned long scale = scale_amount(qd, up);
390 	unsigned long old = iolat->rq_depth.max_depth;
391 
392 	if (old > qd)
393 		old = qd;
394 
395 	if (up) {
396 		if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
397 			return;
398 
399 		if (old < qd) {
400 			old += scale;
401 			old = min(old, qd);
402 			iolat->rq_depth.max_depth = old;
403 			wake_up_all(&iolat->rq_wait.wait);
404 		}
405 	} else {
406 		old >>= 1;
407 		iolat->rq_depth.max_depth = max(old, 1UL);
408 	}
409 }
410 
411 /* Check our parent and see if the scale cookie has changed. */
412 static void check_scale_change(struct iolatency_grp *iolat)
413 {
414 	struct iolatency_grp *parent;
415 	struct child_latency_info *lat_info;
416 	unsigned int cur_cookie;
417 	unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
418 	u64 scale_lat;
419 	unsigned int old;
420 	int direction = 0;
421 
422 	if (lat_to_blkg(iolat)->parent == NULL)
423 		return;
424 
425 	parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
426 	if (!parent)
427 		return;
428 
429 	lat_info = &parent->child_lat;
430 	cur_cookie = atomic_read(&lat_info->scale_cookie);
431 	scale_lat = READ_ONCE(lat_info->scale_lat);
432 
433 	if (cur_cookie < our_cookie)
434 		direction = -1;
435 	else if (cur_cookie > our_cookie)
436 		direction = 1;
437 	else
438 		return;
439 
440 	old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
441 
442 	/* Somebody beat us to the punch, just bail. */
443 	if (old != our_cookie)
444 		return;
445 
446 	if (direction < 0 && iolat->min_lat_nsec) {
447 		u64 samples_thresh;
448 
449 		if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
450 			return;
451 
452 		/*
453 		 * Sometimes high priority groups are their own worst enemy, so
454 		 * instead of taking it out on some poor other group that did 5%
455 		 * or less of the IO's for the last summation just skip this
456 		 * scale down event.
457 		 */
458 		samples_thresh = lat_info->nr_samples * 5;
459 		samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
460 		if (iolat->nr_samples <= samples_thresh)
461 			return;
462 	}
463 
464 	/* We're as low as we can go. */
465 	if (iolat->rq_depth.max_depth == 1 && direction < 0) {
466 		blkcg_use_delay(lat_to_blkg(iolat));
467 		return;
468 	}
469 
470 	/* We're back to the default cookie, unthrottle all the things. */
471 	if (cur_cookie == DEFAULT_SCALE_COOKIE) {
472 		blkcg_clear_delay(lat_to_blkg(iolat));
473 		iolat->rq_depth.max_depth = UINT_MAX;
474 		wake_up_all(&iolat->rq_wait.wait);
475 		return;
476 	}
477 
478 	scale_change(iolat, direction > 0);
479 }
480 
481 static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
482 				     spinlock_t *lock)
483 {
484 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
485 	struct blkcg *blkcg;
486 	struct blkcg_gq *blkg;
487 	struct request_queue *q = rqos->q;
488 	bool issue_as_root = bio_issue_as_root_blkg(bio);
489 
490 	if (!blk_iolatency_enabled(blkiolat))
491 		return;
492 
493 	rcu_read_lock();
494 	blkcg = bio_blkcg(bio);
495 	bio_associate_blkcg(bio, &blkcg->css);
496 	blkg = blkg_lookup(blkcg, q);
497 	if (unlikely(!blkg)) {
498 		if (!lock)
499 			spin_lock_irq(q->queue_lock);
500 		blkg = blkg_lookup_create(blkcg, q);
501 		if (IS_ERR(blkg))
502 			blkg = NULL;
503 		if (!lock)
504 			spin_unlock_irq(q->queue_lock);
505 	}
506 	if (!blkg)
507 		goto out;
508 
509 	bio_issue_init(&bio->bi_issue, bio_sectors(bio));
510 	bio_associate_blkg(bio, blkg);
511 out:
512 	rcu_read_unlock();
513 	while (blkg && blkg->parent) {
514 		struct iolatency_grp *iolat = blkg_to_lat(blkg);
515 		if (!iolat) {
516 			blkg = blkg->parent;
517 			continue;
518 		}
519 
520 		check_scale_change(iolat);
521 		__blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
522 				     (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
523 		blkg = blkg->parent;
524 	}
525 	if (!timer_pending(&blkiolat->timer))
526 		mod_timer(&blkiolat->timer, jiffies + HZ);
527 }
528 
529 static void iolatency_record_time(struct iolatency_grp *iolat,
530 				  struct bio_issue *issue, u64 now,
531 				  bool issue_as_root)
532 {
533 	u64 start = bio_issue_time(issue);
534 	u64 req_time;
535 
536 	/*
537 	 * Have to do this so we are truncated to the correct time that our
538 	 * issue is truncated to.
539 	 */
540 	now = __bio_issue_time(now);
541 
542 	if (now <= start)
543 		return;
544 
545 	req_time = now - start;
546 
547 	/*
548 	 * We don't want to count issue_as_root bio's in the cgroups latency
549 	 * statistics as it could skew the numbers downwards.
550 	 */
551 	if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
552 		u64 sub = iolat->min_lat_nsec;
553 		if (req_time < sub)
554 			blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
555 		return;
556 	}
557 
558 	latency_stat_record_time(iolat, req_time);
559 }
560 
561 #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
562 #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
563 
564 static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
565 {
566 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
567 	struct iolatency_grp *parent;
568 	struct child_latency_info *lat_info;
569 	struct latency_stat stat;
570 	unsigned long flags;
571 	int cpu;
572 
573 	latency_stat_init(iolat, &stat);
574 	preempt_disable();
575 	for_each_online_cpu(cpu) {
576 		struct latency_stat *s;
577 		s = per_cpu_ptr(iolat->stats, cpu);
578 		latency_stat_sum(iolat, &stat, s);
579 		latency_stat_init(iolat, s);
580 	}
581 	preempt_enable();
582 
583 	parent = blkg_to_lat(blkg->parent);
584 	if (!parent)
585 		return;
586 
587 	lat_info = &parent->child_lat;
588 
589 	iolat_update_total_lat_avg(iolat, &stat);
590 
591 	/* Everything is ok and we don't need to adjust the scale. */
592 	if (latency_sum_ok(iolat, &stat) &&
593 	    atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
594 		return;
595 
596 	/* Somebody beat us to the punch, just bail. */
597 	spin_lock_irqsave(&lat_info->lock, flags);
598 
599 	latency_stat_sum(iolat, &iolat->cur_stat, &stat);
600 	lat_info->nr_samples -= iolat->nr_samples;
601 	lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
602 	iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
603 
604 	if ((lat_info->last_scale_event >= now ||
605 	    now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
606 		goto out;
607 
608 	if (latency_sum_ok(iolat, &iolat->cur_stat) &&
609 	    latency_sum_ok(iolat, &stat)) {
610 		if (latency_stat_samples(iolat, &iolat->cur_stat) <
611 		    BLKIOLATENCY_MIN_GOOD_SAMPLES)
612 			goto out;
613 		if (lat_info->scale_grp == iolat) {
614 			lat_info->last_scale_event = now;
615 			scale_cookie_change(iolat->blkiolat, lat_info, true);
616 		}
617 	} else if (lat_info->scale_lat == 0 ||
618 		   lat_info->scale_lat >= iolat->min_lat_nsec) {
619 		lat_info->last_scale_event = now;
620 		if (!lat_info->scale_grp ||
621 		    lat_info->scale_lat > iolat->min_lat_nsec) {
622 			WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
623 			lat_info->scale_grp = iolat;
624 		}
625 		scale_cookie_change(iolat->blkiolat, lat_info, false);
626 	}
627 	latency_stat_init(iolat, &iolat->cur_stat);
628 out:
629 	spin_unlock_irqrestore(&lat_info->lock, flags);
630 }
631 
632 static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
633 {
634 	struct blkcg_gq *blkg;
635 	struct rq_wait *rqw;
636 	struct iolatency_grp *iolat;
637 	u64 window_start;
638 	u64 now = ktime_to_ns(ktime_get());
639 	bool issue_as_root = bio_issue_as_root_blkg(bio);
640 	bool enabled = false;
641 
642 	blkg = bio->bi_blkg;
643 	if (!blkg)
644 		return;
645 
646 	iolat = blkg_to_lat(bio->bi_blkg);
647 	if (!iolat)
648 		return;
649 
650 	enabled = blk_iolatency_enabled(iolat->blkiolat);
651 	while (blkg && blkg->parent) {
652 		iolat = blkg_to_lat(blkg);
653 		if (!iolat) {
654 			blkg = blkg->parent;
655 			continue;
656 		}
657 		rqw = &iolat->rq_wait;
658 
659 		atomic_dec(&rqw->inflight);
660 		if (!enabled || iolat->min_lat_nsec == 0)
661 			goto next;
662 		iolatency_record_time(iolat, &bio->bi_issue, now,
663 				      issue_as_root);
664 		window_start = atomic64_read(&iolat->window_start);
665 		if (now > window_start &&
666 		    (now - window_start) >= iolat->cur_win_nsec) {
667 			if (atomic64_cmpxchg(&iolat->window_start,
668 					window_start, now) == window_start)
669 				iolatency_check_latencies(iolat, now);
670 		}
671 next:
672 		wake_up(&rqw->wait);
673 		blkg = blkg->parent;
674 	}
675 }
676 
677 static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
678 {
679 	struct blkcg_gq *blkg;
680 
681 	blkg = bio->bi_blkg;
682 	while (blkg && blkg->parent) {
683 		struct rq_wait *rqw;
684 		struct iolatency_grp *iolat;
685 
686 		iolat = blkg_to_lat(blkg);
687 		if (!iolat)
688 			goto next;
689 
690 		rqw = &iolat->rq_wait;
691 		atomic_dec(&rqw->inflight);
692 		wake_up(&rqw->wait);
693 next:
694 		blkg = blkg->parent;
695 	}
696 }
697 
698 static void blkcg_iolatency_exit(struct rq_qos *rqos)
699 {
700 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
701 
702 	del_timer_sync(&blkiolat->timer);
703 	blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
704 	kfree(blkiolat);
705 }
706 
707 static struct rq_qos_ops blkcg_iolatency_ops = {
708 	.throttle = blkcg_iolatency_throttle,
709 	.cleanup = blkcg_iolatency_cleanup,
710 	.done_bio = blkcg_iolatency_done_bio,
711 	.exit = blkcg_iolatency_exit,
712 };
713 
714 static void blkiolatency_timer_fn(struct timer_list *t)
715 {
716 	struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
717 	struct blkcg_gq *blkg;
718 	struct cgroup_subsys_state *pos_css;
719 	u64 now = ktime_to_ns(ktime_get());
720 
721 	rcu_read_lock();
722 	blkg_for_each_descendant_pre(blkg, pos_css,
723 				     blkiolat->rqos.q->root_blkg) {
724 		struct iolatency_grp *iolat;
725 		struct child_latency_info *lat_info;
726 		unsigned long flags;
727 		u64 cookie;
728 
729 		/*
730 		 * We could be exiting, don't access the pd unless we have a
731 		 * ref on the blkg.
732 		 */
733 		if (!blkg_try_get(blkg))
734 			continue;
735 
736 		iolat = blkg_to_lat(blkg);
737 		if (!iolat)
738 			goto next;
739 
740 		lat_info = &iolat->child_lat;
741 		cookie = atomic_read(&lat_info->scale_cookie);
742 
743 		if (cookie >= DEFAULT_SCALE_COOKIE)
744 			goto next;
745 
746 		spin_lock_irqsave(&lat_info->lock, flags);
747 		if (lat_info->last_scale_event >= now)
748 			goto next_lock;
749 
750 		/*
751 		 * We scaled down but don't have a scale_grp, scale up and carry
752 		 * on.
753 		 */
754 		if (lat_info->scale_grp == NULL) {
755 			scale_cookie_change(iolat->blkiolat, lat_info, true);
756 			goto next_lock;
757 		}
758 
759 		/*
760 		 * It's been 5 seconds since our last scale event, clear the
761 		 * scale grp in case the group that needed the scale down isn't
762 		 * doing any IO currently.
763 		 */
764 		if (now - lat_info->last_scale_event >=
765 		    ((u64)NSEC_PER_SEC * 5))
766 			lat_info->scale_grp = NULL;
767 next_lock:
768 		spin_unlock_irqrestore(&lat_info->lock, flags);
769 next:
770 		blkg_put(blkg);
771 	}
772 	rcu_read_unlock();
773 }
774 
775 int blk_iolatency_init(struct request_queue *q)
776 {
777 	struct blk_iolatency *blkiolat;
778 	struct rq_qos *rqos;
779 	int ret;
780 
781 	blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
782 	if (!blkiolat)
783 		return -ENOMEM;
784 
785 	rqos = &blkiolat->rqos;
786 	rqos->id = RQ_QOS_CGROUP;
787 	rqos->ops = &blkcg_iolatency_ops;
788 	rqos->q = q;
789 
790 	rq_qos_add(q, rqos);
791 
792 	ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
793 	if (ret) {
794 		rq_qos_del(q, rqos);
795 		kfree(blkiolat);
796 		return ret;
797 	}
798 
799 	timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
800 
801 	return 0;
802 }
803 
804 static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
805 {
806 	struct iolatency_grp *iolat = blkg_to_lat(blkg);
807 	struct blk_iolatency *blkiolat = iolat->blkiolat;
808 	u64 oldval = iolat->min_lat_nsec;
809 
810 	iolat->min_lat_nsec = val;
811 	iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
812 	iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
813 				    BLKIOLATENCY_MAX_WIN_SIZE);
814 
815 	if (!oldval && val)
816 		atomic_inc(&blkiolat->enabled);
817 	if (oldval && !val)
818 		atomic_dec(&blkiolat->enabled);
819 }
820 
821 static void iolatency_clear_scaling(struct blkcg_gq *blkg)
822 {
823 	if (blkg->parent) {
824 		struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
825 		struct child_latency_info *lat_info;
826 		if (!iolat)
827 			return;
828 
829 		lat_info = &iolat->child_lat;
830 		spin_lock(&lat_info->lock);
831 		atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
832 		lat_info->last_scale_event = 0;
833 		lat_info->scale_grp = NULL;
834 		lat_info->scale_lat = 0;
835 		spin_unlock(&lat_info->lock);
836 	}
837 }
838 
839 static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
840 			     size_t nbytes, loff_t off)
841 {
842 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
843 	struct blkcg_gq *blkg;
844 	struct blkg_conf_ctx ctx;
845 	struct iolatency_grp *iolat;
846 	char *p, *tok;
847 	u64 lat_val = 0;
848 	u64 oldval;
849 	int ret;
850 
851 	ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
852 	if (ret)
853 		return ret;
854 
855 	iolat = blkg_to_lat(ctx.blkg);
856 	p = ctx.body;
857 
858 	ret = -EINVAL;
859 	while ((tok = strsep(&p, " "))) {
860 		char key[16];
861 		char val[21];	/* 18446744073709551616 */
862 
863 		if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
864 			goto out;
865 
866 		if (!strcmp(key, "target")) {
867 			u64 v;
868 
869 			if (!strcmp(val, "max"))
870 				lat_val = 0;
871 			else if (sscanf(val, "%llu", &v) == 1)
872 				lat_val = v * NSEC_PER_USEC;
873 			else
874 				goto out;
875 		} else {
876 			goto out;
877 		}
878 	}
879 
880 	/* Walk up the tree to see if our new val is lower than it should be. */
881 	blkg = ctx.blkg;
882 	oldval = iolat->min_lat_nsec;
883 
884 	iolatency_set_min_lat_nsec(blkg, lat_val);
885 	if (oldval != iolat->min_lat_nsec) {
886 		iolatency_clear_scaling(blkg);
887 	}
888 
889 	ret = 0;
890 out:
891 	blkg_conf_finish(&ctx);
892 	return ret ?: nbytes;
893 }
894 
895 static u64 iolatency_prfill_limit(struct seq_file *sf,
896 				  struct blkg_policy_data *pd, int off)
897 {
898 	struct iolatency_grp *iolat = pd_to_lat(pd);
899 	const char *dname = blkg_dev_name(pd->blkg);
900 
901 	if (!dname || !iolat->min_lat_nsec)
902 		return 0;
903 	seq_printf(sf, "%s target=%llu\n",
904 		   dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
905 	return 0;
906 }
907 
908 static int iolatency_print_limit(struct seq_file *sf, void *v)
909 {
910 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
911 			  iolatency_prfill_limit,
912 			  &blkcg_policy_iolatency, seq_cft(sf)->private, false);
913 	return 0;
914 }
915 
916 static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
917 				 size_t size)
918 {
919 	struct latency_stat stat;
920 	int cpu;
921 
922 	latency_stat_init(iolat, &stat);
923 	preempt_disable();
924 	for_each_online_cpu(cpu) {
925 		struct latency_stat *s;
926 		s = per_cpu_ptr(iolat->stats, cpu);
927 		latency_stat_sum(iolat, &stat, s);
928 	}
929 	preempt_enable();
930 
931 	if (iolat->rq_depth.max_depth == UINT_MAX)
932 		return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
933 				 (unsigned long long)stat.ps.missed,
934 				 (unsigned long long)stat.ps.total);
935 	return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
936 			 (unsigned long long)stat.ps.missed,
937 			 (unsigned long long)stat.ps.total,
938 			 iolat->rq_depth.max_depth);
939 }
940 
941 static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
942 				size_t size)
943 {
944 	struct iolatency_grp *iolat = pd_to_lat(pd);
945 	unsigned long long avg_lat;
946 	unsigned long long cur_win;
947 
948 	if (iolat->ssd)
949 		return iolatency_ssd_stat(iolat, buf, size);
950 
951 	avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
952 	cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
953 	if (iolat->rq_depth.max_depth == UINT_MAX)
954 		return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
955 				 avg_lat, cur_win);
956 
957 	return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
958 			 iolat->rq_depth.max_depth, avg_lat, cur_win);
959 }
960 
961 
962 static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
963 {
964 	struct iolatency_grp *iolat;
965 
966 	iolat = kzalloc_node(sizeof(*iolat), gfp, node);
967 	if (!iolat)
968 		return NULL;
969 	iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
970 				       __alignof__(struct latency_stat), gfp);
971 	if (!iolat->stats) {
972 		kfree(iolat);
973 		return NULL;
974 	}
975 	return &iolat->pd;
976 }
977 
978 static void iolatency_pd_init(struct blkg_policy_data *pd)
979 {
980 	struct iolatency_grp *iolat = pd_to_lat(pd);
981 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
982 	struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
983 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
984 	u64 now = ktime_to_ns(ktime_get());
985 	int cpu;
986 
987 	if (blk_queue_nonrot(blkg->q))
988 		iolat->ssd = true;
989 	else
990 		iolat->ssd = false;
991 
992 	for_each_possible_cpu(cpu) {
993 		struct latency_stat *stat;
994 		stat = per_cpu_ptr(iolat->stats, cpu);
995 		latency_stat_init(iolat, stat);
996 	}
997 
998 	latency_stat_init(iolat, &iolat->cur_stat);
999 	rq_wait_init(&iolat->rq_wait);
1000 	spin_lock_init(&iolat->child_lat.lock);
1001 	iolat->rq_depth.queue_depth = blkg->q->nr_requests;
1002 	iolat->rq_depth.max_depth = UINT_MAX;
1003 	iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
1004 	iolat->blkiolat = blkiolat;
1005 	iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
1006 	atomic64_set(&iolat->window_start, now);
1007 
1008 	/*
1009 	 * We init things in list order, so the pd for the parent may not be
1010 	 * init'ed yet for whatever reason.
1011 	 */
1012 	if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
1013 		struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
1014 		atomic_set(&iolat->scale_cookie,
1015 			   atomic_read(&parent->child_lat.scale_cookie));
1016 	} else {
1017 		atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
1018 	}
1019 
1020 	atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
1021 }
1022 
1023 static void iolatency_pd_offline(struct blkg_policy_data *pd)
1024 {
1025 	struct iolatency_grp *iolat = pd_to_lat(pd);
1026 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
1027 
1028 	iolatency_set_min_lat_nsec(blkg, 0);
1029 	iolatency_clear_scaling(blkg);
1030 }
1031 
1032 static void iolatency_pd_free(struct blkg_policy_data *pd)
1033 {
1034 	struct iolatency_grp *iolat = pd_to_lat(pd);
1035 	free_percpu(iolat->stats);
1036 	kfree(iolat);
1037 }
1038 
1039 static struct cftype iolatency_files[] = {
1040 	{
1041 		.name = "latency",
1042 		.flags = CFTYPE_NOT_ON_ROOT,
1043 		.seq_show = iolatency_print_limit,
1044 		.write = iolatency_set_limit,
1045 	},
1046 	{}
1047 };
1048 
1049 static struct blkcg_policy blkcg_policy_iolatency = {
1050 	.dfl_cftypes	= iolatency_files,
1051 	.pd_alloc_fn	= iolatency_pd_alloc,
1052 	.pd_init_fn	= iolatency_pd_init,
1053 	.pd_offline_fn	= iolatency_pd_offline,
1054 	.pd_free_fn	= iolatency_pd_free,
1055 	.pd_stat_fn	= iolatency_pd_stat,
1056 };
1057 
1058 static int __init iolatency_init(void)
1059 {
1060 	return blkcg_policy_register(&blkcg_policy_iolatency);
1061 }
1062 
1063 static void __exit iolatency_exit(void)
1064 {
1065 	return blkcg_policy_unregister(&blkcg_policy_iolatency);
1066 }
1067 
1068 module_init(iolatency_init);
1069 module_exit(iolatency_exit);
1070