xref: /openbmc/linux/block/blk-iolatency.c (revision 451bb7c3)
1d7067512SJosef Bacik /*
2d7067512SJosef Bacik  * Block rq-qos base io controller
3d7067512SJosef Bacik  *
4d7067512SJosef Bacik  * This works similar to wbt with a few exceptions
5d7067512SJosef Bacik  *
6d7067512SJosef Bacik  * - It's bio based, so the latency covers the whole block layer in addition to
7d7067512SJosef Bacik  *   the actual io.
8d7067512SJosef Bacik  * - We will throttle all IO that comes in here if we need to.
9d7067512SJosef Bacik  * - We use the mean latency over the 100ms window.  This is because writes can
10d7067512SJosef Bacik  *   be particularly fast, which could give us a false sense of the impact of
11d7067512SJosef Bacik  *   other workloads on our protected workload.
12a284390bSJosef Bacik  * - By default there's no throttling, we set the queue_depth to UINT_MAX so
13a284390bSJosef Bacik  *   that we can have as many outstanding bio's as we're allowed to.  Only at
14d7067512SJosef Bacik  *   throttle time do we pay attention to the actual queue depth.
15d7067512SJosef Bacik  *
16d7067512SJosef Bacik  * The hierarchy works like the cpu controller does, we track the latency at
17d7067512SJosef Bacik  * every configured node, and each configured node has it's own independent
18d7067512SJosef Bacik  * queue depth.  This means that we only care about our latency targets at the
19d7067512SJosef Bacik  * peer level.  Some group at the bottom of the hierarchy isn't going to affect
20d7067512SJosef Bacik  * a group at the end of some other path if we're only configred at leaf level.
21d7067512SJosef Bacik  *
22d7067512SJosef Bacik  * Consider the following
23d7067512SJosef Bacik  *
24d7067512SJosef Bacik  *                   root blkg
25d7067512SJosef Bacik  *             /                     \
26d7067512SJosef Bacik  *        fast (target=5ms)     slow (target=10ms)
27d7067512SJosef Bacik  *         /     \                  /        \
28d7067512SJosef Bacik  *       a        b          normal(15ms)   unloved
29d7067512SJosef Bacik  *
30d7067512SJosef Bacik  * "a" and "b" have no target, but their combined io under "fast" cannot exceed
31d7067512SJosef Bacik  * an average latency of 5ms.  If it does then we will throttle the "slow"
32d7067512SJosef Bacik  * group.  In the case of "normal", if it exceeds its 15ms target, we will
33d7067512SJosef Bacik  * throttle "unloved", but nobody else.
34d7067512SJosef Bacik  *
35d7067512SJosef Bacik  * In this example "fast", "slow", and "normal" will be the only groups actually
36d7067512SJosef Bacik  * accounting their io latencies.  We have to walk up the heirarchy to the root
37d7067512SJosef Bacik  * on every submit and complete so we can do the appropriate stat recording and
38d7067512SJosef Bacik  * adjust the queue depth of ourselves if needed.
39d7067512SJosef Bacik  *
40d7067512SJosef Bacik  * There are 2 ways we throttle IO.
41d7067512SJosef Bacik  *
42d7067512SJosef Bacik  * 1) Queue depth throttling.  As we throttle down we will adjust the maximum
43d7067512SJosef Bacik  * number of IO's we're allowed to have in flight.  This starts at (u64)-1 down
44d7067512SJosef Bacik  * to 1.  If the group is only ever submitting IO for itself then this is the
45d7067512SJosef Bacik  * only way we throttle.
46d7067512SJosef Bacik  *
47d7067512SJosef Bacik  * 2) Induced delay throttling.  This is for the case that a group is generating
48d7067512SJosef Bacik  * IO that has to be issued by the root cg to avoid priority inversion. So think
49d7067512SJosef Bacik  * REQ_META or REQ_SWAP.  If we are already at qd == 1 and we're getting a lot
50d7067512SJosef Bacik  * of work done for us on behalf of the root cg and are being asked to scale
51d7067512SJosef Bacik  * down more then we induce a latency at userspace return.  We accumulate the
52d7067512SJosef Bacik  * total amount of time we need to be punished by doing
53d7067512SJosef Bacik  *
54d7067512SJosef Bacik  * total_time += min_lat_nsec - actual_io_completion
55d7067512SJosef Bacik  *
56d7067512SJosef Bacik  * and then at throttle time will do
57d7067512SJosef Bacik  *
58d7067512SJosef Bacik  * throttle_time = min(total_time, NSEC_PER_SEC)
59d7067512SJosef Bacik  *
60d7067512SJosef Bacik  * This induced delay will throttle back the activity that is generating the
61d7067512SJosef Bacik  * root cg issued io's, wethere that's some metadata intensive operation or the
62d7067512SJosef Bacik  * group is using so much memory that it is pushing us into swap.
63d7067512SJosef Bacik  *
64d7067512SJosef Bacik  * Copyright (C) 2018 Josef Bacik
65d7067512SJosef Bacik  */
66d7067512SJosef Bacik #include <linux/kernel.h>
67d7067512SJosef Bacik #include <linux/blk_types.h>
68d7067512SJosef Bacik #include <linux/backing-dev.h>
69d7067512SJosef Bacik #include <linux/module.h>
70d7067512SJosef Bacik #include <linux/timer.h>
71d7067512SJosef Bacik #include <linux/memcontrol.h>
72c480bcf9SDennis Zhou (Facebook) #include <linux/sched/loadavg.h>
73d7067512SJosef Bacik #include <linux/sched/signal.h>
74d7067512SJosef Bacik #include <trace/events/block.h>
75d7067512SJosef Bacik #include "blk-rq-qos.h"
76d7067512SJosef Bacik #include "blk-stat.h"
77d7067512SJosef Bacik 
78d7067512SJosef Bacik #define DEFAULT_SCALE_COOKIE 1000000U
79d7067512SJosef Bacik 
80d7067512SJosef Bacik static struct blkcg_policy blkcg_policy_iolatency;
81d7067512SJosef Bacik struct iolatency_grp;
82d7067512SJosef Bacik 
83d7067512SJosef Bacik struct blk_iolatency {
84d7067512SJosef Bacik 	struct rq_qos rqos;
85d7067512SJosef Bacik 	struct timer_list timer;
86d7067512SJosef Bacik 	atomic_t enabled;
87d7067512SJosef Bacik };
88d7067512SJosef Bacik 
89d7067512SJosef Bacik static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
90d7067512SJosef Bacik {
91d7067512SJosef Bacik 	return container_of(rqos, struct blk_iolatency, rqos);
92d7067512SJosef Bacik }
93d7067512SJosef Bacik 
94d7067512SJosef Bacik static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
95d7067512SJosef Bacik {
96d7067512SJosef Bacik 	return atomic_read(&blkiolat->enabled) > 0;
97d7067512SJosef Bacik }
98d7067512SJosef Bacik 
99d7067512SJosef Bacik struct child_latency_info {
100d7067512SJosef Bacik 	spinlock_t lock;
101d7067512SJosef Bacik 
102d7067512SJosef Bacik 	/* Last time we adjusted the scale of everybody. */
103d7067512SJosef Bacik 	u64 last_scale_event;
104d7067512SJosef Bacik 
105d7067512SJosef Bacik 	/* The latency that we missed. */
106d7067512SJosef Bacik 	u64 scale_lat;
107d7067512SJosef Bacik 
108d7067512SJosef Bacik 	/* Total io's from all of our children for the last summation. */
109d7067512SJosef Bacik 	u64 nr_samples;
110d7067512SJosef Bacik 
111d7067512SJosef Bacik 	/* The guy who actually changed the latency numbers. */
112d7067512SJosef Bacik 	struct iolatency_grp *scale_grp;
113d7067512SJosef Bacik 
114d7067512SJosef Bacik 	/* Cookie to tell if we need to scale up or down. */
115d7067512SJosef Bacik 	atomic_t scale_cookie;
116d7067512SJosef Bacik };
117d7067512SJosef Bacik 
1181fa2840eSJosef Bacik struct percentile_stats {
1191fa2840eSJosef Bacik 	u64 total;
1201fa2840eSJosef Bacik 	u64 missed;
1211fa2840eSJosef Bacik };
1221fa2840eSJosef Bacik 
1231fa2840eSJosef Bacik struct latency_stat {
1241fa2840eSJosef Bacik 	union {
1251fa2840eSJosef Bacik 		struct percentile_stats ps;
1261fa2840eSJosef Bacik 		struct blk_rq_stat rqs;
1271fa2840eSJosef Bacik 	};
1281fa2840eSJosef Bacik };
1291fa2840eSJosef Bacik 
130d7067512SJosef Bacik struct iolatency_grp {
131d7067512SJosef Bacik 	struct blkg_policy_data pd;
1321fa2840eSJosef Bacik 	struct latency_stat __percpu *stats;
133451bb7c3SJosef Bacik 	struct latency_stat cur_stat;
134d7067512SJosef Bacik 	struct blk_iolatency *blkiolat;
135d7067512SJosef Bacik 	struct rq_depth rq_depth;
136d7067512SJosef Bacik 	struct rq_wait rq_wait;
137d7067512SJosef Bacik 	atomic64_t window_start;
138d7067512SJosef Bacik 	atomic_t scale_cookie;
139d7067512SJosef Bacik 	u64 min_lat_nsec;
140d7067512SJosef Bacik 	u64 cur_win_nsec;
141d7067512SJosef Bacik 
142d7067512SJosef Bacik 	/* total running average of our io latency. */
143c480bcf9SDennis Zhou (Facebook) 	u64 lat_avg;
144d7067512SJosef Bacik 
145d7067512SJosef Bacik 	/* Our current number of IO's for the last summation. */
146d7067512SJosef Bacik 	u64 nr_samples;
147d7067512SJosef Bacik 
1481fa2840eSJosef Bacik 	bool ssd;
149d7067512SJosef Bacik 	struct child_latency_info child_lat;
150d7067512SJosef Bacik };
151d7067512SJosef Bacik 
152c480bcf9SDennis Zhou (Facebook) #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
153c480bcf9SDennis Zhou (Facebook) #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
154c480bcf9SDennis Zhou (Facebook) /*
155c480bcf9SDennis Zhou (Facebook)  * These are the constants used to fake the fixed-point moving average
156c480bcf9SDennis Zhou (Facebook)  * calculation just like load average.  The call to CALC_LOAD folds
157c480bcf9SDennis Zhou (Facebook)  * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg.  The sampling
158c480bcf9SDennis Zhou (Facebook)  * window size is bucketed to try to approximately calculate average
159c480bcf9SDennis Zhou (Facebook)  * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
160c480bcf9SDennis Zhou (Facebook)  * elapse immediately.  Note, windows only elapse with IO activity.  Idle
161c480bcf9SDennis Zhou (Facebook)  * periods extend the most recent window.
162c480bcf9SDennis Zhou (Facebook)  */
163c480bcf9SDennis Zhou (Facebook) #define BLKIOLATENCY_NR_EXP_FACTORS 5
164c480bcf9SDennis Zhou (Facebook) #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
165c480bcf9SDennis Zhou (Facebook) 				      (BLKIOLATENCY_NR_EXP_FACTORS - 1))
166c480bcf9SDennis Zhou (Facebook) static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
167c480bcf9SDennis Zhou (Facebook) 	2045, // exp(1/600) - 600 samples
168c480bcf9SDennis Zhou (Facebook) 	2039, // exp(1/240) - 240 samples
169c480bcf9SDennis Zhou (Facebook) 	2031, // exp(1/120) - 120 samples
170c480bcf9SDennis Zhou (Facebook) 	2023, // exp(1/80)  - 80 samples
171c480bcf9SDennis Zhou (Facebook) 	2014, // exp(1/60)  - 60 samples
172c480bcf9SDennis Zhou (Facebook) };
173c480bcf9SDennis Zhou (Facebook) 
174d7067512SJosef Bacik static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
175d7067512SJosef Bacik {
176d7067512SJosef Bacik 	return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
177d7067512SJosef Bacik }
178d7067512SJosef Bacik 
179d7067512SJosef Bacik static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
180d7067512SJosef Bacik {
181d7067512SJosef Bacik 	return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
182d7067512SJosef Bacik }
183d7067512SJosef Bacik 
184d7067512SJosef Bacik static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
185d7067512SJosef Bacik {
186d7067512SJosef Bacik 	return pd_to_blkg(&iolat->pd);
187d7067512SJosef Bacik }
188d7067512SJosef Bacik 
1891fa2840eSJosef Bacik static inline void latency_stat_init(struct iolatency_grp *iolat,
1901fa2840eSJosef Bacik 				     struct latency_stat *stat)
1911fa2840eSJosef Bacik {
1921fa2840eSJosef Bacik 	if (iolat->ssd) {
1931fa2840eSJosef Bacik 		stat->ps.total = 0;
1941fa2840eSJosef Bacik 		stat->ps.missed = 0;
1951fa2840eSJosef Bacik 	} else
1961fa2840eSJosef Bacik 		blk_rq_stat_init(&stat->rqs);
1971fa2840eSJosef Bacik }
1981fa2840eSJosef Bacik 
1991fa2840eSJosef Bacik static inline void latency_stat_sum(struct iolatency_grp *iolat,
2001fa2840eSJosef Bacik 				    struct latency_stat *sum,
2011fa2840eSJosef Bacik 				    struct latency_stat *stat)
2021fa2840eSJosef Bacik {
2031fa2840eSJosef Bacik 	if (iolat->ssd) {
2041fa2840eSJosef Bacik 		sum->ps.total += stat->ps.total;
2051fa2840eSJosef Bacik 		sum->ps.missed += stat->ps.missed;
2061fa2840eSJosef Bacik 	} else
2071fa2840eSJosef Bacik 		blk_rq_stat_sum(&sum->rqs, &stat->rqs);
2081fa2840eSJosef Bacik }
2091fa2840eSJosef Bacik 
2101fa2840eSJosef Bacik static inline void latency_stat_record_time(struct iolatency_grp *iolat,
2111fa2840eSJosef Bacik 					    u64 req_time)
2121fa2840eSJosef Bacik {
2131fa2840eSJosef Bacik 	struct latency_stat *stat = get_cpu_ptr(iolat->stats);
2141fa2840eSJosef Bacik 	if (iolat->ssd) {
2151fa2840eSJosef Bacik 		if (req_time >= iolat->min_lat_nsec)
2161fa2840eSJosef Bacik 			stat->ps.missed++;
2171fa2840eSJosef Bacik 		stat->ps.total++;
2181fa2840eSJosef Bacik 	} else
2191fa2840eSJosef Bacik 		blk_rq_stat_add(&stat->rqs, req_time);
2201fa2840eSJosef Bacik 	put_cpu_ptr(stat);
2211fa2840eSJosef Bacik }
2221fa2840eSJosef Bacik 
2231fa2840eSJosef Bacik static inline bool latency_sum_ok(struct iolatency_grp *iolat,
2241fa2840eSJosef Bacik 				  struct latency_stat *stat)
2251fa2840eSJosef Bacik {
2261fa2840eSJosef Bacik 	if (iolat->ssd) {
2271fa2840eSJosef Bacik 		u64 thresh = div64_u64(stat->ps.total, 10);
2281fa2840eSJosef Bacik 		thresh = max(thresh, 1ULL);
2291fa2840eSJosef Bacik 		return stat->ps.missed < thresh;
2301fa2840eSJosef Bacik 	}
2311fa2840eSJosef Bacik 	return stat->rqs.mean <= iolat->min_lat_nsec;
2321fa2840eSJosef Bacik }
2331fa2840eSJosef Bacik 
2341fa2840eSJosef Bacik static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
2351fa2840eSJosef Bacik 				       struct latency_stat *stat)
2361fa2840eSJosef Bacik {
2371fa2840eSJosef Bacik 	if (iolat->ssd)
2381fa2840eSJosef Bacik 		return stat->ps.total;
2391fa2840eSJosef Bacik 	return stat->rqs.nr_samples;
2401fa2840eSJosef Bacik }
2411fa2840eSJosef Bacik 
2421fa2840eSJosef Bacik static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
2431fa2840eSJosef Bacik 					      struct latency_stat *stat)
2441fa2840eSJosef Bacik {
2451fa2840eSJosef Bacik 	int exp_idx;
2461fa2840eSJosef Bacik 
2471fa2840eSJosef Bacik 	if (iolat->ssd)
2481fa2840eSJosef Bacik 		return;
2491fa2840eSJosef Bacik 
2501fa2840eSJosef Bacik 	/*
2511fa2840eSJosef Bacik 	 * CALC_LOAD takes in a number stored in fixed point representation.
2521fa2840eSJosef Bacik 	 * Because we are using this for IO time in ns, the values stored
2531fa2840eSJosef Bacik 	 * are significantly larger than the FIXED_1 denominator (2048).
2541fa2840eSJosef Bacik 	 * Therefore, rounding errors in the calculation are negligible and
2551fa2840eSJosef Bacik 	 * can be ignored.
2561fa2840eSJosef Bacik 	 */
2571fa2840eSJosef Bacik 	exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
2581fa2840eSJosef Bacik 			div64_u64(iolat->cur_win_nsec,
2591fa2840eSJosef Bacik 				  BLKIOLATENCY_EXP_BUCKET_SIZE));
2601fa2840eSJosef Bacik 	CALC_LOAD(iolat->lat_avg, iolatency_exp_factors[exp_idx], stat->rqs.mean);
2611fa2840eSJosef Bacik }
2621fa2840eSJosef Bacik 
263d7067512SJosef Bacik static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
264d7067512SJosef Bacik 				       wait_queue_entry_t *wait,
265d7067512SJosef Bacik 				       bool first_block)
266d7067512SJosef Bacik {
267d7067512SJosef Bacik 	struct rq_wait *rqw = &iolat->rq_wait;
268d7067512SJosef Bacik 
269d7067512SJosef Bacik 	if (first_block && waitqueue_active(&rqw->wait) &&
270d7067512SJosef Bacik 	    rqw->wait.head.next != &wait->entry)
271d7067512SJosef Bacik 		return false;
272d7067512SJosef Bacik 	return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
273d7067512SJosef Bacik }
274d7067512SJosef Bacik 
275d7067512SJosef Bacik static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
276d7067512SJosef Bacik 				       struct iolatency_grp *iolat,
277d7067512SJosef Bacik 				       spinlock_t *lock, bool issue_as_root,
278d7067512SJosef Bacik 				       bool use_memdelay)
279d7067512SJosef Bacik 	__releases(lock)
280d7067512SJosef Bacik 	__acquires(lock)
281d7067512SJosef Bacik {
282d7067512SJosef Bacik 	struct rq_wait *rqw = &iolat->rq_wait;
283d7067512SJosef Bacik 	unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
284d7067512SJosef Bacik 	DEFINE_WAIT(wait);
285d7067512SJosef Bacik 	bool first_block = true;
286d7067512SJosef Bacik 
287d7067512SJosef Bacik 	if (use_delay)
288d7067512SJosef Bacik 		blkcg_schedule_throttle(rqos->q, use_memdelay);
289d7067512SJosef Bacik 
290d7067512SJosef Bacik 	/*
291d7067512SJosef Bacik 	 * To avoid priority inversions we want to just take a slot if we are
292d7067512SJosef Bacik 	 * issuing as root.  If we're being killed off there's no point in
293d7067512SJosef Bacik 	 * delaying things, we may have been killed by OOM so throttling may
294d7067512SJosef Bacik 	 * make recovery take even longer, so just let the IO's through so the
295d7067512SJosef Bacik 	 * task can go away.
296d7067512SJosef Bacik 	 */
297d7067512SJosef Bacik 	if (issue_as_root || fatal_signal_pending(current)) {
298d7067512SJosef Bacik 		atomic_inc(&rqw->inflight);
299d7067512SJosef Bacik 		return;
300d7067512SJosef Bacik 	}
301d7067512SJosef Bacik 
302d7067512SJosef Bacik 	if (iolatency_may_queue(iolat, &wait, first_block))
303d7067512SJosef Bacik 		return;
304d7067512SJosef Bacik 
305d7067512SJosef Bacik 	do {
306d7067512SJosef Bacik 		prepare_to_wait_exclusive(&rqw->wait, &wait,
307d7067512SJosef Bacik 					  TASK_UNINTERRUPTIBLE);
308d7067512SJosef Bacik 
309d7067512SJosef Bacik 		if (iolatency_may_queue(iolat, &wait, first_block))
310d7067512SJosef Bacik 			break;
311d7067512SJosef Bacik 		first_block = false;
312d7067512SJosef Bacik 
313d7067512SJosef Bacik 		if (lock) {
314d7067512SJosef Bacik 			spin_unlock_irq(lock);
315d7067512SJosef Bacik 			io_schedule();
316d7067512SJosef Bacik 			spin_lock_irq(lock);
317d7067512SJosef Bacik 		} else {
318d7067512SJosef Bacik 			io_schedule();
319d7067512SJosef Bacik 		}
320d7067512SJosef Bacik 	} while (1);
321d7067512SJosef Bacik 
322d7067512SJosef Bacik 	finish_wait(&rqw->wait, &wait);
323d7067512SJosef Bacik }
324d7067512SJosef Bacik 
325d7067512SJosef Bacik #define SCALE_DOWN_FACTOR 2
326d7067512SJosef Bacik #define SCALE_UP_FACTOR 4
327d7067512SJosef Bacik 
328d7067512SJosef Bacik static inline unsigned long scale_amount(unsigned long qd, bool up)
329d7067512SJosef Bacik {
330d7067512SJosef Bacik 	return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
331d7067512SJosef Bacik }
332d7067512SJosef Bacik 
333d7067512SJosef Bacik /*
334d7067512SJosef Bacik  * We scale the qd down faster than we scale up, so we need to use this helper
335d7067512SJosef Bacik  * to adjust the scale_cookie accordingly so we don't prematurely get
336d7067512SJosef Bacik  * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
337d7067512SJosef Bacik  *
338d7067512SJosef Bacik  * Each group has their own local copy of the last scale cookie they saw, so if
339d7067512SJosef Bacik  * the global scale cookie goes up or down they know which way they need to go
340d7067512SJosef Bacik  * based on their last knowledge of it.
341d7067512SJosef Bacik  */
342d7067512SJosef Bacik static void scale_cookie_change(struct blk_iolatency *blkiolat,
343d7067512SJosef Bacik 				struct child_latency_info *lat_info,
344d7067512SJosef Bacik 				bool up)
345d7067512SJosef Bacik {
346ff4cee08SJosef Bacik 	unsigned long qd = blkiolat->rqos.q->nr_requests;
347d7067512SJosef Bacik 	unsigned long scale = scale_amount(qd, up);
348d7067512SJosef Bacik 	unsigned long old = atomic_read(&lat_info->scale_cookie);
349d7067512SJosef Bacik 	unsigned long max_scale = qd << 1;
350d7067512SJosef Bacik 	unsigned long diff = 0;
351d7067512SJosef Bacik 
352d7067512SJosef Bacik 	if (old < DEFAULT_SCALE_COOKIE)
353d7067512SJosef Bacik 		diff = DEFAULT_SCALE_COOKIE - old;
354d7067512SJosef Bacik 
355d7067512SJosef Bacik 	if (up) {
356d7067512SJosef Bacik 		if (scale + old > DEFAULT_SCALE_COOKIE)
357d7067512SJosef Bacik 			atomic_set(&lat_info->scale_cookie,
358d7067512SJosef Bacik 				   DEFAULT_SCALE_COOKIE);
359d7067512SJosef Bacik 		else if (diff > qd)
360d7067512SJosef Bacik 			atomic_inc(&lat_info->scale_cookie);
361d7067512SJosef Bacik 		else
362d7067512SJosef Bacik 			atomic_add(scale, &lat_info->scale_cookie);
363d7067512SJosef Bacik 	} else {
364d7067512SJosef Bacik 		/*
365d7067512SJosef Bacik 		 * We don't want to dig a hole so deep that it takes us hours to
366d7067512SJosef Bacik 		 * dig out of it.  Just enough that we don't throttle/unthrottle
367d7067512SJosef Bacik 		 * with jagged workloads but can still unthrottle once pressure
368d7067512SJosef Bacik 		 * has sufficiently dissipated.
369d7067512SJosef Bacik 		 */
370d7067512SJosef Bacik 		if (diff > qd) {
371d7067512SJosef Bacik 			if (diff < max_scale)
372d7067512SJosef Bacik 				atomic_dec(&lat_info->scale_cookie);
373d7067512SJosef Bacik 		} else {
374d7067512SJosef Bacik 			atomic_sub(scale, &lat_info->scale_cookie);
375d7067512SJosef Bacik 		}
376d7067512SJosef Bacik 	}
377d7067512SJosef Bacik }
378d7067512SJosef Bacik 
379d7067512SJosef Bacik /*
380d7067512SJosef Bacik  * Change the queue depth of the iolatency_grp.  We add/subtract 1/16th of the
381d7067512SJosef Bacik  * queue depth at a time so we don't get wild swings and hopefully dial in to
382d7067512SJosef Bacik  * fairer distribution of the overall queue depth.
383d7067512SJosef Bacik  */
384d7067512SJosef Bacik static void scale_change(struct iolatency_grp *iolat, bool up)
385d7067512SJosef Bacik {
386ff4cee08SJosef Bacik 	unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
387d7067512SJosef Bacik 	unsigned long scale = scale_amount(qd, up);
388d7067512SJosef Bacik 	unsigned long old = iolat->rq_depth.max_depth;
389d7067512SJosef Bacik 
390d7067512SJosef Bacik 	if (old > qd)
391d7067512SJosef Bacik 		old = qd;
392d7067512SJosef Bacik 
393d7067512SJosef Bacik 	if (up) {
394d7067512SJosef Bacik 		if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
395d7067512SJosef Bacik 			return;
396d7067512SJosef Bacik 
397d7067512SJosef Bacik 		if (old < qd) {
398d7067512SJosef Bacik 			old += scale;
399d7067512SJosef Bacik 			old = min(old, qd);
400d7067512SJosef Bacik 			iolat->rq_depth.max_depth = old;
401d7067512SJosef Bacik 			wake_up_all(&iolat->rq_wait.wait);
402d7067512SJosef Bacik 		}
4039f60511aSJosef Bacik 	} else {
404d7067512SJosef Bacik 		old >>= 1;
405d7067512SJosef Bacik 		iolat->rq_depth.max_depth = max(old, 1UL);
406d7067512SJosef Bacik 	}
407d7067512SJosef Bacik }
408d7067512SJosef Bacik 
409d7067512SJosef Bacik /* Check our parent and see if the scale cookie has changed. */
410d7067512SJosef Bacik static void check_scale_change(struct iolatency_grp *iolat)
411d7067512SJosef Bacik {
412d7067512SJosef Bacik 	struct iolatency_grp *parent;
413d7067512SJosef Bacik 	struct child_latency_info *lat_info;
414d7067512SJosef Bacik 	unsigned int cur_cookie;
415d7067512SJosef Bacik 	unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
416d7067512SJosef Bacik 	u64 scale_lat;
417d7067512SJosef Bacik 	unsigned int old;
418d7067512SJosef Bacik 	int direction = 0;
419d7067512SJosef Bacik 
420d7067512SJosef Bacik 	if (lat_to_blkg(iolat)->parent == NULL)
421d7067512SJosef Bacik 		return;
422d7067512SJosef Bacik 
423d7067512SJosef Bacik 	parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
424d7067512SJosef Bacik 	if (!parent)
425d7067512SJosef Bacik 		return;
426d7067512SJosef Bacik 
427d7067512SJosef Bacik 	lat_info = &parent->child_lat;
428d7067512SJosef Bacik 	cur_cookie = atomic_read(&lat_info->scale_cookie);
429d7067512SJosef Bacik 	scale_lat = READ_ONCE(lat_info->scale_lat);
430d7067512SJosef Bacik 
431d7067512SJosef Bacik 	if (cur_cookie < our_cookie)
432d7067512SJosef Bacik 		direction = -1;
433d7067512SJosef Bacik 	else if (cur_cookie > our_cookie)
434d7067512SJosef Bacik 		direction = 1;
435d7067512SJosef Bacik 	else
436d7067512SJosef Bacik 		return;
437d7067512SJosef Bacik 
438d7067512SJosef Bacik 	old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
439d7067512SJosef Bacik 
440d7067512SJosef Bacik 	/* Somebody beat us to the punch, just bail. */
441d7067512SJosef Bacik 	if (old != our_cookie)
442d7067512SJosef Bacik 		return;
443d7067512SJosef Bacik 
444d7067512SJosef Bacik 	if (direction < 0 && iolat->min_lat_nsec) {
445d7067512SJosef Bacik 		u64 samples_thresh;
446d7067512SJosef Bacik 
447d7067512SJosef Bacik 		if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
448d7067512SJosef Bacik 			return;
449d7067512SJosef Bacik 
450d7067512SJosef Bacik 		/*
451d7067512SJosef Bacik 		 * Sometimes high priority groups are their own worst enemy, so
452d7067512SJosef Bacik 		 * instead of taking it out on some poor other group that did 5%
453d7067512SJosef Bacik 		 * or less of the IO's for the last summation just skip this
454d7067512SJosef Bacik 		 * scale down event.
455d7067512SJosef Bacik 		 */
456d7067512SJosef Bacik 		samples_thresh = lat_info->nr_samples * 5;
45722ed8a93SJosef Bacik 		samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
458d7067512SJosef Bacik 		if (iolat->nr_samples <= samples_thresh)
459d7067512SJosef Bacik 			return;
460d7067512SJosef Bacik 	}
461d7067512SJosef Bacik 
462d7067512SJosef Bacik 	/* We're as low as we can go. */
463d7067512SJosef Bacik 	if (iolat->rq_depth.max_depth == 1 && direction < 0) {
464d7067512SJosef Bacik 		blkcg_use_delay(lat_to_blkg(iolat));
465d7067512SJosef Bacik 		return;
466d7067512SJosef Bacik 	}
467d7067512SJosef Bacik 
468d7067512SJosef Bacik 	/* We're back to the default cookie, unthrottle all the things. */
469d7067512SJosef Bacik 	if (cur_cookie == DEFAULT_SCALE_COOKIE) {
470d7067512SJosef Bacik 		blkcg_clear_delay(lat_to_blkg(iolat));
471a284390bSJosef Bacik 		iolat->rq_depth.max_depth = UINT_MAX;
472d7067512SJosef Bacik 		wake_up_all(&iolat->rq_wait.wait);
473d7067512SJosef Bacik 		return;
474d7067512SJosef Bacik 	}
475d7067512SJosef Bacik 
476d7067512SJosef Bacik 	scale_change(iolat, direction > 0);
477d7067512SJosef Bacik }
478d7067512SJosef Bacik 
479d7067512SJosef Bacik static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
480d7067512SJosef Bacik 				     spinlock_t *lock)
481d7067512SJosef Bacik {
482d7067512SJosef Bacik 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
483a7b39b4eSDennis Zhou (Facebook) 	struct blkcg_gq *blkg = bio->bi_blkg;
484d7067512SJosef Bacik 	bool issue_as_root = bio_issue_as_root_blkg(bio);
485d7067512SJosef Bacik 
486d7067512SJosef Bacik 	if (!blk_iolatency_enabled(blkiolat))
487d7067512SJosef Bacik 		return;
488d7067512SJosef Bacik 
489d7067512SJosef Bacik 	while (blkg && blkg->parent) {
490d7067512SJosef Bacik 		struct iolatency_grp *iolat = blkg_to_lat(blkg);
491d7067512SJosef Bacik 		if (!iolat) {
492d7067512SJosef Bacik 			blkg = blkg->parent;
493d7067512SJosef Bacik 			continue;
494d7067512SJosef Bacik 		}
495d7067512SJosef Bacik 
496d7067512SJosef Bacik 		check_scale_change(iolat);
497d7067512SJosef Bacik 		__blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
498d7067512SJosef Bacik 				     (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
499d7067512SJosef Bacik 		blkg = blkg->parent;
500d7067512SJosef Bacik 	}
501d7067512SJosef Bacik 	if (!timer_pending(&blkiolat->timer))
502d7067512SJosef Bacik 		mod_timer(&blkiolat->timer, jiffies + HZ);
503d7067512SJosef Bacik }
504d7067512SJosef Bacik 
505d7067512SJosef Bacik static void iolatency_record_time(struct iolatency_grp *iolat,
506d7067512SJosef Bacik 				  struct bio_issue *issue, u64 now,
507d7067512SJosef Bacik 				  bool issue_as_root)
508d7067512SJosef Bacik {
509d7067512SJosef Bacik 	u64 start = bio_issue_time(issue);
510d7067512SJosef Bacik 	u64 req_time;
511d7067512SJosef Bacik 
51271e9690bSJosef Bacik 	/*
51371e9690bSJosef Bacik 	 * Have to do this so we are truncated to the correct time that our
51471e9690bSJosef Bacik 	 * issue is truncated to.
51571e9690bSJosef Bacik 	 */
51671e9690bSJosef Bacik 	now = __bio_issue_time(now);
51771e9690bSJosef Bacik 
518d7067512SJosef Bacik 	if (now <= start)
519d7067512SJosef Bacik 		return;
520d7067512SJosef Bacik 
521d7067512SJosef Bacik 	req_time = now - start;
522d7067512SJosef Bacik 
523d7067512SJosef Bacik 	/*
524d7067512SJosef Bacik 	 * We don't want to count issue_as_root bio's in the cgroups latency
525d7067512SJosef Bacik 	 * statistics as it could skew the numbers downwards.
526d7067512SJosef Bacik 	 */
527a284390bSJosef Bacik 	if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
528d7067512SJosef Bacik 		u64 sub = iolat->min_lat_nsec;
529d7067512SJosef Bacik 		if (req_time < sub)
530d7067512SJosef Bacik 			blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
531d7067512SJosef Bacik 		return;
532d7067512SJosef Bacik 	}
533d7067512SJosef Bacik 
5341fa2840eSJosef Bacik 	latency_stat_record_time(iolat, req_time);
535d7067512SJosef Bacik }
536d7067512SJosef Bacik 
537d7067512SJosef Bacik #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
538d7067512SJosef Bacik #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
539d7067512SJosef Bacik 
540d7067512SJosef Bacik static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
541d7067512SJosef Bacik {
542d7067512SJosef Bacik 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
543d7067512SJosef Bacik 	struct iolatency_grp *parent;
544d7067512SJosef Bacik 	struct child_latency_info *lat_info;
5451fa2840eSJosef Bacik 	struct latency_stat stat;
546d7067512SJosef Bacik 	unsigned long flags;
5471fa2840eSJosef Bacik 	int cpu;
548d7067512SJosef Bacik 
5491fa2840eSJosef Bacik 	latency_stat_init(iolat, &stat);
550d7067512SJosef Bacik 	preempt_disable();
551d7067512SJosef Bacik 	for_each_online_cpu(cpu) {
5521fa2840eSJosef Bacik 		struct latency_stat *s;
553d7067512SJosef Bacik 		s = per_cpu_ptr(iolat->stats, cpu);
5541fa2840eSJosef Bacik 		latency_stat_sum(iolat, &stat, s);
5551fa2840eSJosef Bacik 		latency_stat_init(iolat, s);
556d7067512SJosef Bacik 	}
557d7067512SJosef Bacik 	preempt_enable();
558d7067512SJosef Bacik 
559d7067512SJosef Bacik 	parent = blkg_to_lat(blkg->parent);
560d7067512SJosef Bacik 	if (!parent)
561d7067512SJosef Bacik 		return;
562d7067512SJosef Bacik 
563d7067512SJosef Bacik 	lat_info = &parent->child_lat;
564d7067512SJosef Bacik 
5651fa2840eSJosef Bacik 	iolat_update_total_lat_avg(iolat, &stat);
566d7067512SJosef Bacik 
567d7067512SJosef Bacik 	/* Everything is ok and we don't need to adjust the scale. */
5681fa2840eSJosef Bacik 	if (latency_sum_ok(iolat, &stat) &&
569d7067512SJosef Bacik 	    atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
570d7067512SJosef Bacik 		return;
571d7067512SJosef Bacik 
572d7067512SJosef Bacik 	/* Somebody beat us to the punch, just bail. */
573d7067512SJosef Bacik 	spin_lock_irqsave(&lat_info->lock, flags);
574451bb7c3SJosef Bacik 
575451bb7c3SJosef Bacik 	latency_stat_sum(iolat, &iolat->cur_stat, &stat);
576d7067512SJosef Bacik 	lat_info->nr_samples -= iolat->nr_samples;
577451bb7c3SJosef Bacik 	lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
578451bb7c3SJosef Bacik 	iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
579d7067512SJosef Bacik 
580d7067512SJosef Bacik 	if ((lat_info->last_scale_event >= now ||
581451bb7c3SJosef Bacik 	    now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
582d7067512SJosef Bacik 		goto out;
583d7067512SJosef Bacik 
584451bb7c3SJosef Bacik 	if (latency_sum_ok(iolat, &iolat->cur_stat) &&
585451bb7c3SJosef Bacik 	    latency_sum_ok(iolat, &stat)) {
586451bb7c3SJosef Bacik 		if (latency_stat_samples(iolat, &iolat->cur_stat) <
5871fa2840eSJosef Bacik 		    BLKIOLATENCY_MIN_GOOD_SAMPLES)
5881fa2840eSJosef Bacik 			goto out;
589d7067512SJosef Bacik 		if (lat_info->scale_grp == iolat) {
590d7067512SJosef Bacik 			lat_info->last_scale_event = now;
591d7067512SJosef Bacik 			scale_cookie_change(iolat->blkiolat, lat_info, true);
592d7067512SJosef Bacik 		}
593451bb7c3SJosef Bacik 	} else if (lat_info->scale_lat == 0 ||
594451bb7c3SJosef Bacik 		   lat_info->scale_lat >= iolat->min_lat_nsec) {
595d7067512SJosef Bacik 		lat_info->last_scale_event = now;
596d7067512SJosef Bacik 		if (!lat_info->scale_grp ||
597d7067512SJosef Bacik 		    lat_info->scale_lat > iolat->min_lat_nsec) {
598d7067512SJosef Bacik 			WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
599d7067512SJosef Bacik 			lat_info->scale_grp = iolat;
600d7067512SJosef Bacik 		}
601d7067512SJosef Bacik 		scale_cookie_change(iolat->blkiolat, lat_info, false);
602d7067512SJosef Bacik 	}
603451bb7c3SJosef Bacik 	latency_stat_init(iolat, &iolat->cur_stat);
604d7067512SJosef Bacik out:
605d7067512SJosef Bacik 	spin_unlock_irqrestore(&lat_info->lock, flags);
606d7067512SJosef Bacik }
607d7067512SJosef Bacik 
608d7067512SJosef Bacik static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
609d7067512SJosef Bacik {
610d7067512SJosef Bacik 	struct blkcg_gq *blkg;
611d7067512SJosef Bacik 	struct rq_wait *rqw;
612d7067512SJosef Bacik 	struct iolatency_grp *iolat;
613d7067512SJosef Bacik 	u64 window_start;
614d7067512SJosef Bacik 	u64 now = ktime_to_ns(ktime_get());
615d7067512SJosef Bacik 	bool issue_as_root = bio_issue_as_root_blkg(bio);
616d7067512SJosef Bacik 	bool enabled = false;
617d7067512SJosef Bacik 
618d7067512SJosef Bacik 	blkg = bio->bi_blkg;
619d7067512SJosef Bacik 	if (!blkg)
620d7067512SJosef Bacik 		return;
621d7067512SJosef Bacik 
622d7067512SJosef Bacik 	iolat = blkg_to_lat(bio->bi_blkg);
623d7067512SJosef Bacik 	if (!iolat)
624d7067512SJosef Bacik 		return;
625d7067512SJosef Bacik 
626d7067512SJosef Bacik 	enabled = blk_iolatency_enabled(iolat->blkiolat);
627d7067512SJosef Bacik 	while (blkg && blkg->parent) {
628d7067512SJosef Bacik 		iolat = blkg_to_lat(blkg);
629d7067512SJosef Bacik 		if (!iolat) {
630d7067512SJosef Bacik 			blkg = blkg->parent;
631d7067512SJosef Bacik 			continue;
632d7067512SJosef Bacik 		}
633d7067512SJosef Bacik 		rqw = &iolat->rq_wait;
634d7067512SJosef Bacik 
635d7067512SJosef Bacik 		atomic_dec(&rqw->inflight);
636d7067512SJosef Bacik 		if (!enabled || iolat->min_lat_nsec == 0)
637d7067512SJosef Bacik 			goto next;
638d7067512SJosef Bacik 		iolatency_record_time(iolat, &bio->bi_issue, now,
639d7067512SJosef Bacik 				      issue_as_root);
640d7067512SJosef Bacik 		window_start = atomic64_read(&iolat->window_start);
641d7067512SJosef Bacik 		if (now > window_start &&
642d7067512SJosef Bacik 		    (now - window_start) >= iolat->cur_win_nsec) {
643d7067512SJosef Bacik 			if (atomic64_cmpxchg(&iolat->window_start,
644d7067512SJosef Bacik 					window_start, now) == window_start)
645d7067512SJosef Bacik 				iolatency_check_latencies(iolat, now);
646d7067512SJosef Bacik 		}
647d7067512SJosef Bacik next:
648d7067512SJosef Bacik 		wake_up(&rqw->wait);
649d7067512SJosef Bacik 		blkg = blkg->parent;
650d7067512SJosef Bacik 	}
651d7067512SJosef Bacik }
652d7067512SJosef Bacik 
653d7067512SJosef Bacik static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
654d7067512SJosef Bacik {
655d7067512SJosef Bacik 	struct blkcg_gq *blkg;
656d7067512SJosef Bacik 
657d7067512SJosef Bacik 	blkg = bio->bi_blkg;
658d7067512SJosef Bacik 	while (blkg && blkg->parent) {
659d7067512SJosef Bacik 		struct rq_wait *rqw;
660d7067512SJosef Bacik 		struct iolatency_grp *iolat;
661d7067512SJosef Bacik 
662d7067512SJosef Bacik 		iolat = blkg_to_lat(blkg);
663d7067512SJosef Bacik 		if (!iolat)
664d7067512SJosef Bacik 			goto next;
665d7067512SJosef Bacik 
666d7067512SJosef Bacik 		rqw = &iolat->rq_wait;
667d7067512SJosef Bacik 		atomic_dec(&rqw->inflight);
668d7067512SJosef Bacik 		wake_up(&rqw->wait);
669d7067512SJosef Bacik next:
670d7067512SJosef Bacik 		blkg = blkg->parent;
671d7067512SJosef Bacik 	}
672d7067512SJosef Bacik }
673d7067512SJosef Bacik 
674d7067512SJosef Bacik static void blkcg_iolatency_exit(struct rq_qos *rqos)
675d7067512SJosef Bacik {
676d7067512SJosef Bacik 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
677d7067512SJosef Bacik 
678d7067512SJosef Bacik 	del_timer_sync(&blkiolat->timer);
679d7067512SJosef Bacik 	blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
680d7067512SJosef Bacik 	kfree(blkiolat);
681d7067512SJosef Bacik }
682d7067512SJosef Bacik 
683d7067512SJosef Bacik static struct rq_qos_ops blkcg_iolatency_ops = {
684d7067512SJosef Bacik 	.throttle = blkcg_iolatency_throttle,
685d7067512SJosef Bacik 	.cleanup = blkcg_iolatency_cleanup,
686d7067512SJosef Bacik 	.done_bio = blkcg_iolatency_done_bio,
687d7067512SJosef Bacik 	.exit = blkcg_iolatency_exit,
688d7067512SJosef Bacik };
689d7067512SJosef Bacik 
690d7067512SJosef Bacik static void blkiolatency_timer_fn(struct timer_list *t)
691d7067512SJosef Bacik {
692d7067512SJosef Bacik 	struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
693d7067512SJosef Bacik 	struct blkcg_gq *blkg;
694d7067512SJosef Bacik 	struct cgroup_subsys_state *pos_css;
695d7067512SJosef Bacik 	u64 now = ktime_to_ns(ktime_get());
696d7067512SJosef Bacik 
697d7067512SJosef Bacik 	rcu_read_lock();
698d7067512SJosef Bacik 	blkg_for_each_descendant_pre(blkg, pos_css,
699d7067512SJosef Bacik 				     blkiolat->rqos.q->root_blkg) {
700d7067512SJosef Bacik 		struct iolatency_grp *iolat;
701d7067512SJosef Bacik 		struct child_latency_info *lat_info;
702d7067512SJosef Bacik 		unsigned long flags;
703d7067512SJosef Bacik 		u64 cookie;
704d7067512SJosef Bacik 
705d7067512SJosef Bacik 		/*
706d7067512SJosef Bacik 		 * We could be exiting, don't access the pd unless we have a
707d7067512SJosef Bacik 		 * ref on the blkg.
708d7067512SJosef Bacik 		 */
709101246ecSDennis Zhou (Facebook) 		if (!blkg_tryget(blkg))
710d7067512SJosef Bacik 			continue;
711d7067512SJosef Bacik 
712d7067512SJosef Bacik 		iolat = blkg_to_lat(blkg);
713d7067512SJosef Bacik 		if (!iolat)
71452a1199cSJosef Bacik 			goto next;
715d7067512SJosef Bacik 
716d7067512SJosef Bacik 		lat_info = &iolat->child_lat;
717d7067512SJosef Bacik 		cookie = atomic_read(&lat_info->scale_cookie);
718d7067512SJosef Bacik 
719d7067512SJosef Bacik 		if (cookie >= DEFAULT_SCALE_COOKIE)
720d7067512SJosef Bacik 			goto next;
721d7067512SJosef Bacik 
722d7067512SJosef Bacik 		spin_lock_irqsave(&lat_info->lock, flags);
723d7067512SJosef Bacik 		if (lat_info->last_scale_event >= now)
724d7067512SJosef Bacik 			goto next_lock;
725d7067512SJosef Bacik 
726d7067512SJosef Bacik 		/*
727d7067512SJosef Bacik 		 * We scaled down but don't have a scale_grp, scale up and carry
728d7067512SJosef Bacik 		 * on.
729d7067512SJosef Bacik 		 */
730d7067512SJosef Bacik 		if (lat_info->scale_grp == NULL) {
731d7067512SJosef Bacik 			scale_cookie_change(iolat->blkiolat, lat_info, true);
732d7067512SJosef Bacik 			goto next_lock;
733d7067512SJosef Bacik 		}
734d7067512SJosef Bacik 
735d7067512SJosef Bacik 		/*
736d7067512SJosef Bacik 		 * It's been 5 seconds since our last scale event, clear the
737d7067512SJosef Bacik 		 * scale grp in case the group that needed the scale down isn't
738d7067512SJosef Bacik 		 * doing any IO currently.
739d7067512SJosef Bacik 		 */
740d7067512SJosef Bacik 		if (now - lat_info->last_scale_event >=
741d7067512SJosef Bacik 		    ((u64)NSEC_PER_SEC * 5))
742d7067512SJosef Bacik 			lat_info->scale_grp = NULL;
743d7067512SJosef Bacik next_lock:
744d7067512SJosef Bacik 		spin_unlock_irqrestore(&lat_info->lock, flags);
745d7067512SJosef Bacik next:
746d7067512SJosef Bacik 		blkg_put(blkg);
747d7067512SJosef Bacik 	}
748d7067512SJosef Bacik 	rcu_read_unlock();
749d7067512SJosef Bacik }
750d7067512SJosef Bacik 
751d7067512SJosef Bacik int blk_iolatency_init(struct request_queue *q)
752d7067512SJosef Bacik {
753d7067512SJosef Bacik 	struct blk_iolatency *blkiolat;
754d7067512SJosef Bacik 	struct rq_qos *rqos;
755d7067512SJosef Bacik 	int ret;
756d7067512SJosef Bacik 
757d7067512SJosef Bacik 	blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
758d7067512SJosef Bacik 	if (!blkiolat)
759d7067512SJosef Bacik 		return -ENOMEM;
760d7067512SJosef Bacik 
761d7067512SJosef Bacik 	rqos = &blkiolat->rqos;
762d7067512SJosef Bacik 	rqos->id = RQ_QOS_CGROUP;
763d7067512SJosef Bacik 	rqos->ops = &blkcg_iolatency_ops;
764d7067512SJosef Bacik 	rqos->q = q;
765d7067512SJosef Bacik 
766d7067512SJosef Bacik 	rq_qos_add(q, rqos);
767d7067512SJosef Bacik 
768d7067512SJosef Bacik 	ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
769d7067512SJosef Bacik 	if (ret) {
770d7067512SJosef Bacik 		rq_qos_del(q, rqos);
771d7067512SJosef Bacik 		kfree(blkiolat);
772d7067512SJosef Bacik 		return ret;
773d7067512SJosef Bacik 	}
774d7067512SJosef Bacik 
775d7067512SJosef Bacik 	timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
776d7067512SJosef Bacik 
777d7067512SJosef Bacik 	return 0;
778d7067512SJosef Bacik }
779d7067512SJosef Bacik 
780d7067512SJosef Bacik static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
781d7067512SJosef Bacik {
782d7067512SJosef Bacik 	struct iolatency_grp *iolat = blkg_to_lat(blkg);
783d7067512SJosef Bacik 	struct blk_iolatency *blkiolat = iolat->blkiolat;
784d7067512SJosef Bacik 	u64 oldval = iolat->min_lat_nsec;
785d7067512SJosef Bacik 
786d7067512SJosef Bacik 	iolat->min_lat_nsec = val;
787c480bcf9SDennis Zhou (Facebook) 	iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
788c480bcf9SDennis Zhou (Facebook) 	iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
789c480bcf9SDennis Zhou (Facebook) 				    BLKIOLATENCY_MAX_WIN_SIZE);
790d7067512SJosef Bacik 
791d7067512SJosef Bacik 	if (!oldval && val)
792d7067512SJosef Bacik 		atomic_inc(&blkiolat->enabled);
793d7067512SJosef Bacik 	if (oldval && !val)
794d7067512SJosef Bacik 		atomic_dec(&blkiolat->enabled);
795d7067512SJosef Bacik }
796d7067512SJosef Bacik 
797d7067512SJosef Bacik static void iolatency_clear_scaling(struct blkcg_gq *blkg)
798d7067512SJosef Bacik {
799d7067512SJosef Bacik 	if (blkg->parent) {
800d7067512SJosef Bacik 		struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
801d7067512SJosef Bacik 		struct child_latency_info *lat_info;
802d7067512SJosef Bacik 		if (!iolat)
803d7067512SJosef Bacik 			return;
804d7067512SJosef Bacik 
805d7067512SJosef Bacik 		lat_info = &iolat->child_lat;
806d7067512SJosef Bacik 		spin_lock(&lat_info->lock);
807d7067512SJosef Bacik 		atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
808d7067512SJosef Bacik 		lat_info->last_scale_event = 0;
809d7067512SJosef Bacik 		lat_info->scale_grp = NULL;
810d7067512SJosef Bacik 		lat_info->scale_lat = 0;
811d7067512SJosef Bacik 		spin_unlock(&lat_info->lock);
812d7067512SJosef Bacik 	}
813d7067512SJosef Bacik }
814d7067512SJosef Bacik 
815d7067512SJosef Bacik static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
816d7067512SJosef Bacik 			     size_t nbytes, loff_t off)
817d7067512SJosef Bacik {
818d7067512SJosef Bacik 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
819d7067512SJosef Bacik 	struct blkcg_gq *blkg;
820d7067512SJosef Bacik 	struct blkg_conf_ctx ctx;
821d7067512SJosef Bacik 	struct iolatency_grp *iolat;
822d7067512SJosef Bacik 	char *p, *tok;
823d7067512SJosef Bacik 	u64 lat_val = 0;
824d7067512SJosef Bacik 	u64 oldval;
825d7067512SJosef Bacik 	int ret;
826d7067512SJosef Bacik 
827d7067512SJosef Bacik 	ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
828d7067512SJosef Bacik 	if (ret)
829d7067512SJosef Bacik 		return ret;
830d7067512SJosef Bacik 
831d7067512SJosef Bacik 	iolat = blkg_to_lat(ctx.blkg);
832d7067512SJosef Bacik 	p = ctx.body;
833d7067512SJosef Bacik 
834d7067512SJosef Bacik 	ret = -EINVAL;
835d7067512SJosef Bacik 	while ((tok = strsep(&p, " "))) {
836d7067512SJosef Bacik 		char key[16];
837d7067512SJosef Bacik 		char val[21];	/* 18446744073709551616 */
838d7067512SJosef Bacik 
839d7067512SJosef Bacik 		if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
840d7067512SJosef Bacik 			goto out;
841d7067512SJosef Bacik 
842d7067512SJosef Bacik 		if (!strcmp(key, "target")) {
843d7067512SJosef Bacik 			u64 v;
844d7067512SJosef Bacik 
845d7067512SJosef Bacik 			if (!strcmp(val, "max"))
846d7067512SJosef Bacik 				lat_val = 0;
847d7067512SJosef Bacik 			else if (sscanf(val, "%llu", &v) == 1)
848d7067512SJosef Bacik 				lat_val = v * NSEC_PER_USEC;
849d7067512SJosef Bacik 			else
850d7067512SJosef Bacik 				goto out;
851d7067512SJosef Bacik 		} else {
852d7067512SJosef Bacik 			goto out;
853d7067512SJosef Bacik 		}
854d7067512SJosef Bacik 	}
855d7067512SJosef Bacik 
856d7067512SJosef Bacik 	/* Walk up the tree to see if our new val is lower than it should be. */
857d7067512SJosef Bacik 	blkg = ctx.blkg;
858d7067512SJosef Bacik 	oldval = iolat->min_lat_nsec;
859d7067512SJosef Bacik 
860d7067512SJosef Bacik 	iolatency_set_min_lat_nsec(blkg, lat_val);
861d7067512SJosef Bacik 	if (oldval != iolat->min_lat_nsec) {
862d7067512SJosef Bacik 		iolatency_clear_scaling(blkg);
863d7067512SJosef Bacik 	}
864d7067512SJosef Bacik 
865d7067512SJosef Bacik 	ret = 0;
866d7067512SJosef Bacik out:
867d7067512SJosef Bacik 	blkg_conf_finish(&ctx);
868d7067512SJosef Bacik 	return ret ?: nbytes;
869d7067512SJosef Bacik }
870d7067512SJosef Bacik 
871d7067512SJosef Bacik static u64 iolatency_prfill_limit(struct seq_file *sf,
872d7067512SJosef Bacik 				  struct blkg_policy_data *pd, int off)
873d7067512SJosef Bacik {
874d7067512SJosef Bacik 	struct iolatency_grp *iolat = pd_to_lat(pd);
875d7067512SJosef Bacik 	const char *dname = blkg_dev_name(pd->blkg);
876d7067512SJosef Bacik 
877d7067512SJosef Bacik 	if (!dname || !iolat->min_lat_nsec)
878d7067512SJosef Bacik 		return 0;
879d7067512SJosef Bacik 	seq_printf(sf, "%s target=%llu\n",
88088b7210cSArnd Bergmann 		   dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
881d7067512SJosef Bacik 	return 0;
882d7067512SJosef Bacik }
883d7067512SJosef Bacik 
884d7067512SJosef Bacik static int iolatency_print_limit(struct seq_file *sf, void *v)
885d7067512SJosef Bacik {
886d7067512SJosef Bacik 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
887d7067512SJosef Bacik 			  iolatency_prfill_limit,
888d7067512SJosef Bacik 			  &blkcg_policy_iolatency, seq_cft(sf)->private, false);
889d7067512SJosef Bacik 	return 0;
890d7067512SJosef Bacik }
891d7067512SJosef Bacik 
8921fa2840eSJosef Bacik static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
8931fa2840eSJosef Bacik 				 size_t size)
8941fa2840eSJosef Bacik {
8951fa2840eSJosef Bacik 	struct latency_stat stat;
8961fa2840eSJosef Bacik 	int cpu;
8971fa2840eSJosef Bacik 
8981fa2840eSJosef Bacik 	latency_stat_init(iolat, &stat);
8991fa2840eSJosef Bacik 	preempt_disable();
9001fa2840eSJosef Bacik 	for_each_online_cpu(cpu) {
9011fa2840eSJosef Bacik 		struct latency_stat *s;
9021fa2840eSJosef Bacik 		s = per_cpu_ptr(iolat->stats, cpu);
9031fa2840eSJosef Bacik 		latency_stat_sum(iolat, &stat, s);
9041fa2840eSJosef Bacik 	}
9051fa2840eSJosef Bacik 	preempt_enable();
9061fa2840eSJosef Bacik 
9071fa2840eSJosef Bacik 	if (iolat->rq_depth.max_depth == UINT_MAX)
9081fa2840eSJosef Bacik 		return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
9091fa2840eSJosef Bacik 				 (unsigned long long)stat.ps.missed,
9101fa2840eSJosef Bacik 				 (unsigned long long)stat.ps.total);
9111fa2840eSJosef Bacik 	return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
9121fa2840eSJosef Bacik 			 (unsigned long long)stat.ps.missed,
9131fa2840eSJosef Bacik 			 (unsigned long long)stat.ps.total,
9141fa2840eSJosef Bacik 			 iolat->rq_depth.max_depth);
9151fa2840eSJosef Bacik }
9161fa2840eSJosef Bacik 
917d7067512SJosef Bacik static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
918d7067512SJosef Bacik 				size_t size)
919d7067512SJosef Bacik {
920d7067512SJosef Bacik 	struct iolatency_grp *iolat = pd_to_lat(pd);
9211fa2840eSJosef Bacik 	unsigned long long avg_lat;
9221fa2840eSJosef Bacik 	unsigned long long cur_win;
923d7067512SJosef Bacik 
9241fa2840eSJosef Bacik 	if (iolat->ssd)
9251fa2840eSJosef Bacik 		return iolatency_ssd_stat(iolat, buf, size);
9261fa2840eSJosef Bacik 
9271fa2840eSJosef Bacik 	avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
9281fa2840eSJosef Bacik 	cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
929a284390bSJosef Bacik 	if (iolat->rq_depth.max_depth == UINT_MAX)
930c480bcf9SDennis Zhou (Facebook) 		return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
931c480bcf9SDennis Zhou (Facebook) 				 avg_lat, cur_win);
932d7067512SJosef Bacik 
933c480bcf9SDennis Zhou (Facebook) 	return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
934c480bcf9SDennis Zhou (Facebook) 			 iolat->rq_depth.max_depth, avg_lat, cur_win);
935d7067512SJosef Bacik }
936d7067512SJosef Bacik 
937d7067512SJosef Bacik 
938d7067512SJosef Bacik static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
939d7067512SJosef Bacik {
940d7067512SJosef Bacik 	struct iolatency_grp *iolat;
941d7067512SJosef Bacik 
942d7067512SJosef Bacik 	iolat = kzalloc_node(sizeof(*iolat), gfp, node);
943d7067512SJosef Bacik 	if (!iolat)
944d7067512SJosef Bacik 		return NULL;
9451fa2840eSJosef Bacik 	iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
9461fa2840eSJosef Bacik 				       __alignof__(struct latency_stat), gfp);
947d7067512SJosef Bacik 	if (!iolat->stats) {
948d7067512SJosef Bacik 		kfree(iolat);
949d7067512SJosef Bacik 		return NULL;
950d7067512SJosef Bacik 	}
951d7067512SJosef Bacik 	return &iolat->pd;
952d7067512SJosef Bacik }
953d7067512SJosef Bacik 
954d7067512SJosef Bacik static void iolatency_pd_init(struct blkg_policy_data *pd)
955d7067512SJosef Bacik {
956d7067512SJosef Bacik 	struct iolatency_grp *iolat = pd_to_lat(pd);
957d7067512SJosef Bacik 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
958d7067512SJosef Bacik 	struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
959d7067512SJosef Bacik 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
960d7067512SJosef Bacik 	u64 now = ktime_to_ns(ktime_get());
961d7067512SJosef Bacik 	int cpu;
962d7067512SJosef Bacik 
9631fa2840eSJosef Bacik 	if (blk_queue_nonrot(blkg->q))
9641fa2840eSJosef Bacik 		iolat->ssd = true;
9651fa2840eSJosef Bacik 	else
9661fa2840eSJosef Bacik 		iolat->ssd = false;
9671fa2840eSJosef Bacik 
968d7067512SJosef Bacik 	for_each_possible_cpu(cpu) {
9691fa2840eSJosef Bacik 		struct latency_stat *stat;
970d7067512SJosef Bacik 		stat = per_cpu_ptr(iolat->stats, cpu);
9711fa2840eSJosef Bacik 		latency_stat_init(iolat, stat);
972d7067512SJosef Bacik 	}
973d7067512SJosef Bacik 
974451bb7c3SJosef Bacik 	latency_stat_init(iolat, &iolat->cur_stat);
975d7067512SJosef Bacik 	rq_wait_init(&iolat->rq_wait);
976d7067512SJosef Bacik 	spin_lock_init(&iolat->child_lat.lock);
977ff4cee08SJosef Bacik 	iolat->rq_depth.queue_depth = blkg->q->nr_requests;
978a284390bSJosef Bacik 	iolat->rq_depth.max_depth = UINT_MAX;
979d7067512SJosef Bacik 	iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
980d7067512SJosef Bacik 	iolat->blkiolat = blkiolat;
981d7067512SJosef Bacik 	iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
982d7067512SJosef Bacik 	atomic64_set(&iolat->window_start, now);
983d7067512SJosef Bacik 
984d7067512SJosef Bacik 	/*
985d7067512SJosef Bacik 	 * We init things in list order, so the pd for the parent may not be
986d7067512SJosef Bacik 	 * init'ed yet for whatever reason.
987d7067512SJosef Bacik 	 */
988d7067512SJosef Bacik 	if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
989d7067512SJosef Bacik 		struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
990d7067512SJosef Bacik 		atomic_set(&iolat->scale_cookie,
991d7067512SJosef Bacik 			   atomic_read(&parent->child_lat.scale_cookie));
992d7067512SJosef Bacik 	} else {
993d7067512SJosef Bacik 		atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
994d7067512SJosef Bacik 	}
995d7067512SJosef Bacik 
996d7067512SJosef Bacik 	atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
997d7067512SJosef Bacik }
998d7067512SJosef Bacik 
999d7067512SJosef Bacik static void iolatency_pd_offline(struct blkg_policy_data *pd)
1000d7067512SJosef Bacik {
1001d7067512SJosef Bacik 	struct iolatency_grp *iolat = pd_to_lat(pd);
1002d7067512SJosef Bacik 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
1003d7067512SJosef Bacik 
1004d7067512SJosef Bacik 	iolatency_set_min_lat_nsec(blkg, 0);
1005d7067512SJosef Bacik 	iolatency_clear_scaling(blkg);
1006d7067512SJosef Bacik }
1007d7067512SJosef Bacik 
1008d7067512SJosef Bacik static void iolatency_pd_free(struct blkg_policy_data *pd)
1009d7067512SJosef Bacik {
1010d7067512SJosef Bacik 	struct iolatency_grp *iolat = pd_to_lat(pd);
1011d7067512SJosef Bacik 	free_percpu(iolat->stats);
1012d7067512SJosef Bacik 	kfree(iolat);
1013d7067512SJosef Bacik }
1014d7067512SJosef Bacik 
1015d7067512SJosef Bacik static struct cftype iolatency_files[] = {
1016d7067512SJosef Bacik 	{
1017d7067512SJosef Bacik 		.name = "latency",
1018d7067512SJosef Bacik 		.flags = CFTYPE_NOT_ON_ROOT,
1019d7067512SJosef Bacik 		.seq_show = iolatency_print_limit,
1020d7067512SJosef Bacik 		.write = iolatency_set_limit,
1021d7067512SJosef Bacik 	},
1022d7067512SJosef Bacik 	{}
1023d7067512SJosef Bacik };
1024d7067512SJosef Bacik 
1025d7067512SJosef Bacik static struct blkcg_policy blkcg_policy_iolatency = {
1026d7067512SJosef Bacik 	.dfl_cftypes	= iolatency_files,
1027d7067512SJosef Bacik 	.pd_alloc_fn	= iolatency_pd_alloc,
1028d7067512SJosef Bacik 	.pd_init_fn	= iolatency_pd_init,
1029d7067512SJosef Bacik 	.pd_offline_fn	= iolatency_pd_offline,
1030d7067512SJosef Bacik 	.pd_free_fn	= iolatency_pd_free,
1031d7067512SJosef Bacik 	.pd_stat_fn	= iolatency_pd_stat,
1032d7067512SJosef Bacik };
1033d7067512SJosef Bacik 
1034d7067512SJosef Bacik static int __init iolatency_init(void)
1035d7067512SJosef Bacik {
1036d7067512SJosef Bacik 	return blkcg_policy_register(&blkcg_policy_iolatency);
1037d7067512SJosef Bacik }
1038d7067512SJosef Bacik 
1039d7067512SJosef Bacik static void __exit iolatency_exit(void)
1040d7067512SJosef Bacik {
1041d7067512SJosef Bacik 	return blkcg_policy_unregister(&blkcg_policy_iolatency);
1042d7067512SJosef Bacik }
1043d7067512SJosef Bacik 
1044d7067512SJosef Bacik module_init(iolatency_init);
1045d7067512SJosef Bacik module_exit(iolatency_exit);
1046