xref: /openbmc/linux/block/blk-iolatency.c (revision cf09a8ee)
13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2d7067512SJosef Bacik /*
3d7067512SJosef Bacik  * Block rq-qos base io controller
4d7067512SJosef Bacik  *
5d7067512SJosef Bacik  * This works similar to wbt with a few exceptions
6d7067512SJosef Bacik  *
7d7067512SJosef Bacik  * - It's bio based, so the latency covers the whole block layer in addition to
8d7067512SJosef Bacik  *   the actual io.
9d7067512SJosef Bacik  * - We will throttle all IO that comes in here if we need to.
10d7067512SJosef Bacik  * - We use the mean latency over the 100ms window.  This is because writes can
11d7067512SJosef Bacik  *   be particularly fast, which could give us a false sense of the impact of
12d7067512SJosef Bacik  *   other workloads on our protected workload.
13a284390bSJosef Bacik  * - By default there's no throttling, we set the queue_depth to UINT_MAX so
14a284390bSJosef Bacik  *   that we can have as many outstanding bio's as we're allowed to.  Only at
15d7067512SJosef Bacik  *   throttle time do we pay attention to the actual queue depth.
16d7067512SJosef Bacik  *
17d7067512SJosef Bacik  * The hierarchy works like the cpu controller does, we track the latency at
18d7067512SJosef Bacik  * every configured node, and each configured node has it's own independent
19d7067512SJosef Bacik  * queue depth.  This means that we only care about our latency targets at the
20d7067512SJosef Bacik  * peer level.  Some group at the bottom of the hierarchy isn't going to affect
21d7067512SJosef Bacik  * a group at the end of some other path if we're only configred at leaf level.
22d7067512SJosef Bacik  *
23d7067512SJosef Bacik  * Consider the following
24d7067512SJosef Bacik  *
25d7067512SJosef Bacik  *                   root blkg
26d7067512SJosef Bacik  *             /                     \
27d7067512SJosef Bacik  *        fast (target=5ms)     slow (target=10ms)
28d7067512SJosef Bacik  *         /     \                  /        \
29d7067512SJosef Bacik  *       a        b          normal(15ms)   unloved
30d7067512SJosef Bacik  *
31d7067512SJosef Bacik  * "a" and "b" have no target, but their combined io under "fast" cannot exceed
32d7067512SJosef Bacik  * an average latency of 5ms.  If it does then we will throttle the "slow"
33d7067512SJosef Bacik  * group.  In the case of "normal", if it exceeds its 15ms target, we will
34d7067512SJosef Bacik  * throttle "unloved", but nobody else.
35d7067512SJosef Bacik  *
36d7067512SJosef Bacik  * In this example "fast", "slow", and "normal" will be the only groups actually
37d7067512SJosef Bacik  * accounting their io latencies.  We have to walk up the heirarchy to the root
38d7067512SJosef Bacik  * on every submit and complete so we can do the appropriate stat recording and
39d7067512SJosef Bacik  * adjust the queue depth of ourselves if needed.
40d7067512SJosef Bacik  *
41d7067512SJosef Bacik  * There are 2 ways we throttle IO.
42d7067512SJosef Bacik  *
43d7067512SJosef Bacik  * 1) Queue depth throttling.  As we throttle down we will adjust the maximum
44d7067512SJosef Bacik  * number of IO's we're allowed to have in flight.  This starts at (u64)-1 down
45d7067512SJosef Bacik  * to 1.  If the group is only ever submitting IO for itself then this is the
46d7067512SJosef Bacik  * only way we throttle.
47d7067512SJosef Bacik  *
48d7067512SJosef Bacik  * 2) Induced delay throttling.  This is for the case that a group is generating
49d7067512SJosef Bacik  * IO that has to be issued by the root cg to avoid priority inversion. So think
50d7067512SJosef Bacik  * REQ_META or REQ_SWAP.  If we are already at qd == 1 and we're getting a lot
51d7067512SJosef Bacik  * of work done for us on behalf of the root cg and are being asked to scale
52d7067512SJosef Bacik  * down more then we induce a latency at userspace return.  We accumulate the
53d7067512SJosef Bacik  * total amount of time we need to be punished by doing
54d7067512SJosef Bacik  *
55d7067512SJosef Bacik  * total_time += min_lat_nsec - actual_io_completion
56d7067512SJosef Bacik  *
57d7067512SJosef Bacik  * and then at throttle time will do
58d7067512SJosef Bacik  *
59d7067512SJosef Bacik  * throttle_time = min(total_time, NSEC_PER_SEC)
60d7067512SJosef Bacik  *
61d7067512SJosef Bacik  * This induced delay will throttle back the activity that is generating the
62d7067512SJosef Bacik  * root cg issued io's, wethere that's some metadata intensive operation or the
63d7067512SJosef Bacik  * group is using so much memory that it is pushing us into swap.
64d7067512SJosef Bacik  *
65d7067512SJosef Bacik  * Copyright (C) 2018 Josef Bacik
66d7067512SJosef Bacik  */
67d7067512SJosef Bacik #include <linux/kernel.h>
68d7067512SJosef Bacik #include <linux/blk_types.h>
69d7067512SJosef Bacik #include <linux/backing-dev.h>
70d7067512SJosef Bacik #include <linux/module.h>
71d7067512SJosef Bacik #include <linux/timer.h>
72d7067512SJosef Bacik #include <linux/memcontrol.h>
73c480bcf9SDennis Zhou (Facebook) #include <linux/sched/loadavg.h>
74d7067512SJosef Bacik #include <linux/sched/signal.h>
75d7067512SJosef Bacik #include <trace/events/block.h>
768c772a9bSLiu Bo #include <linux/blk-mq.h>
77d7067512SJosef Bacik #include "blk-rq-qos.h"
78d7067512SJosef Bacik #include "blk-stat.h"
79373e915cSBart Van Assche #include "blk.h"
80d7067512SJosef Bacik 
81d7067512SJosef Bacik #define DEFAULT_SCALE_COOKIE 1000000U
82d7067512SJosef Bacik 
83d7067512SJosef Bacik static struct blkcg_policy blkcg_policy_iolatency;
84d7067512SJosef Bacik struct iolatency_grp;
85d7067512SJosef Bacik 
86d7067512SJosef Bacik struct blk_iolatency {
87d7067512SJosef Bacik 	struct rq_qos rqos;
88d7067512SJosef Bacik 	struct timer_list timer;
89d7067512SJosef Bacik 	atomic_t enabled;
90d7067512SJosef Bacik };
91d7067512SJosef Bacik 
92d7067512SJosef Bacik static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
93d7067512SJosef Bacik {
94d7067512SJosef Bacik 	return container_of(rqos, struct blk_iolatency, rqos);
95d7067512SJosef Bacik }
96d7067512SJosef Bacik 
97d7067512SJosef Bacik static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
98d7067512SJosef Bacik {
99d7067512SJosef Bacik 	return atomic_read(&blkiolat->enabled) > 0;
100d7067512SJosef Bacik }
101d7067512SJosef Bacik 
102d7067512SJosef Bacik struct child_latency_info {
103d7067512SJosef Bacik 	spinlock_t lock;
104d7067512SJosef Bacik 
105d7067512SJosef Bacik 	/* Last time we adjusted the scale of everybody. */
106d7067512SJosef Bacik 	u64 last_scale_event;
107d7067512SJosef Bacik 
108d7067512SJosef Bacik 	/* The latency that we missed. */
109d7067512SJosef Bacik 	u64 scale_lat;
110d7067512SJosef Bacik 
111d7067512SJosef Bacik 	/* Total io's from all of our children for the last summation. */
112d7067512SJosef Bacik 	u64 nr_samples;
113d7067512SJosef Bacik 
114d7067512SJosef Bacik 	/* The guy who actually changed the latency numbers. */
115d7067512SJosef Bacik 	struct iolatency_grp *scale_grp;
116d7067512SJosef Bacik 
117d7067512SJosef Bacik 	/* Cookie to tell if we need to scale up or down. */
118d7067512SJosef Bacik 	atomic_t scale_cookie;
119d7067512SJosef Bacik };
120d7067512SJosef Bacik 
1211fa2840eSJosef Bacik struct percentile_stats {
1221fa2840eSJosef Bacik 	u64 total;
1231fa2840eSJosef Bacik 	u64 missed;
1241fa2840eSJosef Bacik };
1251fa2840eSJosef Bacik 
1261fa2840eSJosef Bacik struct latency_stat {
1271fa2840eSJosef Bacik 	union {
1281fa2840eSJosef Bacik 		struct percentile_stats ps;
1291fa2840eSJosef Bacik 		struct blk_rq_stat rqs;
1301fa2840eSJosef Bacik 	};
1311fa2840eSJosef Bacik };
1321fa2840eSJosef Bacik 
133d7067512SJosef Bacik struct iolatency_grp {
134d7067512SJosef Bacik 	struct blkg_policy_data pd;
1351fa2840eSJosef Bacik 	struct latency_stat __percpu *stats;
136451bb7c3SJosef Bacik 	struct latency_stat cur_stat;
137d7067512SJosef Bacik 	struct blk_iolatency *blkiolat;
138d7067512SJosef Bacik 	struct rq_depth rq_depth;
139d7067512SJosef Bacik 	struct rq_wait rq_wait;
140d7067512SJosef Bacik 	atomic64_t window_start;
141d7067512SJosef Bacik 	atomic_t scale_cookie;
142d7067512SJosef Bacik 	u64 min_lat_nsec;
143d7067512SJosef Bacik 	u64 cur_win_nsec;
144d7067512SJosef Bacik 
145d7067512SJosef Bacik 	/* total running average of our io latency. */
146c480bcf9SDennis Zhou (Facebook) 	u64 lat_avg;
147d7067512SJosef Bacik 
148d7067512SJosef Bacik 	/* Our current number of IO's for the last summation. */
149d7067512SJosef Bacik 	u64 nr_samples;
150d7067512SJosef Bacik 
1511fa2840eSJosef Bacik 	bool ssd;
152d7067512SJosef Bacik 	struct child_latency_info child_lat;
153d7067512SJosef Bacik };
154d7067512SJosef Bacik 
155c480bcf9SDennis Zhou (Facebook) #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
156c480bcf9SDennis Zhou (Facebook) #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
157c480bcf9SDennis Zhou (Facebook) /*
158c480bcf9SDennis Zhou (Facebook)  * These are the constants used to fake the fixed-point moving average
1598508cf3fSJohannes Weiner  * calculation just like load average.  The call to calc_load() folds
160c480bcf9SDennis Zhou (Facebook)  * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg.  The sampling
161c480bcf9SDennis Zhou (Facebook)  * window size is bucketed to try to approximately calculate average
162c480bcf9SDennis Zhou (Facebook)  * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
163c480bcf9SDennis Zhou (Facebook)  * elapse immediately.  Note, windows only elapse with IO activity.  Idle
164c480bcf9SDennis Zhou (Facebook)  * periods extend the most recent window.
165c480bcf9SDennis Zhou (Facebook)  */
166c480bcf9SDennis Zhou (Facebook) #define BLKIOLATENCY_NR_EXP_FACTORS 5
167c480bcf9SDennis Zhou (Facebook) #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
168c480bcf9SDennis Zhou (Facebook) 				      (BLKIOLATENCY_NR_EXP_FACTORS - 1))
169c480bcf9SDennis Zhou (Facebook) static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
170c480bcf9SDennis Zhou (Facebook) 	2045, // exp(1/600) - 600 samples
171c480bcf9SDennis Zhou (Facebook) 	2039, // exp(1/240) - 240 samples
172c480bcf9SDennis Zhou (Facebook) 	2031, // exp(1/120) - 120 samples
173c480bcf9SDennis Zhou (Facebook) 	2023, // exp(1/80)  - 80 samples
174c480bcf9SDennis Zhou (Facebook) 	2014, // exp(1/60)  - 60 samples
175c480bcf9SDennis Zhou (Facebook) };
176c480bcf9SDennis Zhou (Facebook) 
177d7067512SJosef Bacik static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
178d7067512SJosef Bacik {
179d7067512SJosef Bacik 	return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
180d7067512SJosef Bacik }
181d7067512SJosef Bacik 
182d7067512SJosef Bacik static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
183d7067512SJosef Bacik {
184d7067512SJosef Bacik 	return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
185d7067512SJosef Bacik }
186d7067512SJosef Bacik 
187d7067512SJosef Bacik static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
188d7067512SJosef Bacik {
189d7067512SJosef Bacik 	return pd_to_blkg(&iolat->pd);
190d7067512SJosef Bacik }
191d7067512SJosef Bacik 
1921fa2840eSJosef Bacik static inline void latency_stat_init(struct iolatency_grp *iolat,
1931fa2840eSJosef Bacik 				     struct latency_stat *stat)
1941fa2840eSJosef Bacik {
1951fa2840eSJosef Bacik 	if (iolat->ssd) {
1961fa2840eSJosef Bacik 		stat->ps.total = 0;
1971fa2840eSJosef Bacik 		stat->ps.missed = 0;
1981fa2840eSJosef Bacik 	} else
1991fa2840eSJosef Bacik 		blk_rq_stat_init(&stat->rqs);
2001fa2840eSJosef Bacik }
2011fa2840eSJosef Bacik 
2021fa2840eSJosef Bacik static inline void latency_stat_sum(struct iolatency_grp *iolat,
2031fa2840eSJosef Bacik 				    struct latency_stat *sum,
2041fa2840eSJosef Bacik 				    struct latency_stat *stat)
2051fa2840eSJosef Bacik {
2061fa2840eSJosef Bacik 	if (iolat->ssd) {
2071fa2840eSJosef Bacik 		sum->ps.total += stat->ps.total;
2081fa2840eSJosef Bacik 		sum->ps.missed += stat->ps.missed;
2091fa2840eSJosef Bacik 	} else
2101fa2840eSJosef Bacik 		blk_rq_stat_sum(&sum->rqs, &stat->rqs);
2111fa2840eSJosef Bacik }
2121fa2840eSJosef Bacik 
2131fa2840eSJosef Bacik static inline void latency_stat_record_time(struct iolatency_grp *iolat,
2141fa2840eSJosef Bacik 					    u64 req_time)
2151fa2840eSJosef Bacik {
2161fa2840eSJosef Bacik 	struct latency_stat *stat = get_cpu_ptr(iolat->stats);
2171fa2840eSJosef Bacik 	if (iolat->ssd) {
2181fa2840eSJosef Bacik 		if (req_time >= iolat->min_lat_nsec)
2191fa2840eSJosef Bacik 			stat->ps.missed++;
2201fa2840eSJosef Bacik 		stat->ps.total++;
2211fa2840eSJosef Bacik 	} else
2221fa2840eSJosef Bacik 		blk_rq_stat_add(&stat->rqs, req_time);
2231fa2840eSJosef Bacik 	put_cpu_ptr(stat);
2241fa2840eSJosef Bacik }
2251fa2840eSJosef Bacik 
2261fa2840eSJosef Bacik static inline bool latency_sum_ok(struct iolatency_grp *iolat,
2271fa2840eSJosef Bacik 				  struct latency_stat *stat)
2281fa2840eSJosef Bacik {
2291fa2840eSJosef Bacik 	if (iolat->ssd) {
2301fa2840eSJosef Bacik 		u64 thresh = div64_u64(stat->ps.total, 10);
2311fa2840eSJosef Bacik 		thresh = max(thresh, 1ULL);
2321fa2840eSJosef Bacik 		return stat->ps.missed < thresh;
2331fa2840eSJosef Bacik 	}
2341fa2840eSJosef Bacik 	return stat->rqs.mean <= iolat->min_lat_nsec;
2351fa2840eSJosef Bacik }
2361fa2840eSJosef Bacik 
2371fa2840eSJosef Bacik static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
2381fa2840eSJosef Bacik 				       struct latency_stat *stat)
2391fa2840eSJosef Bacik {
2401fa2840eSJosef Bacik 	if (iolat->ssd)
2411fa2840eSJosef Bacik 		return stat->ps.total;
2421fa2840eSJosef Bacik 	return stat->rqs.nr_samples;
2431fa2840eSJosef Bacik }
2441fa2840eSJosef Bacik 
2451fa2840eSJosef Bacik static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
2461fa2840eSJosef Bacik 					      struct latency_stat *stat)
2471fa2840eSJosef Bacik {
2481fa2840eSJosef Bacik 	int exp_idx;
2491fa2840eSJosef Bacik 
2501fa2840eSJosef Bacik 	if (iolat->ssd)
2511fa2840eSJosef Bacik 		return;
2521fa2840eSJosef Bacik 
2531fa2840eSJosef Bacik 	/*
2548508cf3fSJohannes Weiner 	 * calc_load() takes in a number stored in fixed point representation.
2551fa2840eSJosef Bacik 	 * Because we are using this for IO time in ns, the values stored
2561fa2840eSJosef Bacik 	 * are significantly larger than the FIXED_1 denominator (2048).
2571fa2840eSJosef Bacik 	 * Therefore, rounding errors in the calculation are negligible and
2581fa2840eSJosef Bacik 	 * can be ignored.
2591fa2840eSJosef Bacik 	 */
2601fa2840eSJosef Bacik 	exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
2611fa2840eSJosef Bacik 			div64_u64(iolat->cur_win_nsec,
2621fa2840eSJosef Bacik 				  BLKIOLATENCY_EXP_BUCKET_SIZE));
2638508cf3fSJohannes Weiner 	iolat->lat_avg = calc_load(iolat->lat_avg,
2648508cf3fSJohannes Weiner 				   iolatency_exp_factors[exp_idx],
2658508cf3fSJohannes Weiner 				   stat->rqs.mean);
2661fa2840eSJosef Bacik }
2671fa2840eSJosef Bacik 
268d3fcdff1SJosef Bacik static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data)
269d7067512SJosef Bacik {
270d3fcdff1SJosef Bacik 	atomic_dec(&rqw->inflight);
271d3fcdff1SJosef Bacik 	wake_up(&rqw->wait);
272d3fcdff1SJosef Bacik }
273d7067512SJosef Bacik 
274d3fcdff1SJosef Bacik static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data)
275d3fcdff1SJosef Bacik {
276d3fcdff1SJosef Bacik 	struct iolatency_grp *iolat = private_data;
277d7067512SJosef Bacik 	return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
278d7067512SJosef Bacik }
279d7067512SJosef Bacik 
280d7067512SJosef Bacik static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
281d7067512SJosef Bacik 				       struct iolatency_grp *iolat,
282d5337560SChristoph Hellwig 				       bool issue_as_root,
283d7067512SJosef Bacik 				       bool use_memdelay)
284d7067512SJosef Bacik {
285d7067512SJosef Bacik 	struct rq_wait *rqw = &iolat->rq_wait;
286d7067512SJosef Bacik 	unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
287d7067512SJosef Bacik 
288d7067512SJosef Bacik 	if (use_delay)
289d7067512SJosef Bacik 		blkcg_schedule_throttle(rqos->q, use_memdelay);
290d7067512SJosef Bacik 
291d7067512SJosef Bacik 	/*
292d7067512SJosef Bacik 	 * To avoid priority inversions we want to just take a slot if we are
293d7067512SJosef Bacik 	 * issuing as root.  If we're being killed off there's no point in
294d7067512SJosef Bacik 	 * delaying things, we may have been killed by OOM so throttling may
295d7067512SJosef Bacik 	 * make recovery take even longer, so just let the IO's through so the
296d7067512SJosef Bacik 	 * task can go away.
297d7067512SJosef Bacik 	 */
298d7067512SJosef Bacik 	if (issue_as_root || fatal_signal_pending(current)) {
299d7067512SJosef Bacik 		atomic_inc(&rqw->inflight);
300d7067512SJosef Bacik 		return;
301d7067512SJosef Bacik 	}
302d7067512SJosef Bacik 
303d3fcdff1SJosef Bacik 	rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
304d7067512SJosef Bacik }
305d7067512SJosef Bacik 
306d7067512SJosef Bacik #define SCALE_DOWN_FACTOR 2
307d7067512SJosef Bacik #define SCALE_UP_FACTOR 4
308d7067512SJosef Bacik 
309d7067512SJosef Bacik static inline unsigned long scale_amount(unsigned long qd, bool up)
310d7067512SJosef Bacik {
311d7067512SJosef Bacik 	return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
312d7067512SJosef Bacik }
313d7067512SJosef Bacik 
314d7067512SJosef Bacik /*
315d7067512SJosef Bacik  * We scale the qd down faster than we scale up, so we need to use this helper
316d7067512SJosef Bacik  * to adjust the scale_cookie accordingly so we don't prematurely get
317d7067512SJosef Bacik  * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
318d7067512SJosef Bacik  *
319d7067512SJosef Bacik  * Each group has their own local copy of the last scale cookie they saw, so if
320d7067512SJosef Bacik  * the global scale cookie goes up or down they know which way they need to go
321d7067512SJosef Bacik  * based on their last knowledge of it.
322d7067512SJosef Bacik  */
323d7067512SJosef Bacik static void scale_cookie_change(struct blk_iolatency *blkiolat,
324d7067512SJosef Bacik 				struct child_latency_info *lat_info,
325d7067512SJosef Bacik 				bool up)
326d7067512SJosef Bacik {
327ff4cee08SJosef Bacik 	unsigned long qd = blkiolat->rqos.q->nr_requests;
328d7067512SJosef Bacik 	unsigned long scale = scale_amount(qd, up);
329d7067512SJosef Bacik 	unsigned long old = atomic_read(&lat_info->scale_cookie);
330d7067512SJosef Bacik 	unsigned long max_scale = qd << 1;
331d7067512SJosef Bacik 	unsigned long diff = 0;
332d7067512SJosef Bacik 
333d7067512SJosef Bacik 	if (old < DEFAULT_SCALE_COOKIE)
334d7067512SJosef Bacik 		diff = DEFAULT_SCALE_COOKIE - old;
335d7067512SJosef Bacik 
336d7067512SJosef Bacik 	if (up) {
337d7067512SJosef Bacik 		if (scale + old > DEFAULT_SCALE_COOKIE)
338d7067512SJosef Bacik 			atomic_set(&lat_info->scale_cookie,
339d7067512SJosef Bacik 				   DEFAULT_SCALE_COOKIE);
340d7067512SJosef Bacik 		else if (diff > qd)
341d7067512SJosef Bacik 			atomic_inc(&lat_info->scale_cookie);
342d7067512SJosef Bacik 		else
343d7067512SJosef Bacik 			atomic_add(scale, &lat_info->scale_cookie);
344d7067512SJosef Bacik 	} else {
345d7067512SJosef Bacik 		/*
346d7067512SJosef Bacik 		 * We don't want to dig a hole so deep that it takes us hours to
347d7067512SJosef Bacik 		 * dig out of it.  Just enough that we don't throttle/unthrottle
348d7067512SJosef Bacik 		 * with jagged workloads but can still unthrottle once pressure
349d7067512SJosef Bacik 		 * has sufficiently dissipated.
350d7067512SJosef Bacik 		 */
351d7067512SJosef Bacik 		if (diff > qd) {
352d7067512SJosef Bacik 			if (diff < max_scale)
353d7067512SJosef Bacik 				atomic_dec(&lat_info->scale_cookie);
354d7067512SJosef Bacik 		} else {
355d7067512SJosef Bacik 			atomic_sub(scale, &lat_info->scale_cookie);
356d7067512SJosef Bacik 		}
357d7067512SJosef Bacik 	}
358d7067512SJosef Bacik }
359d7067512SJosef Bacik 
360d7067512SJosef Bacik /*
361d7067512SJosef Bacik  * Change the queue depth of the iolatency_grp.  We add/subtract 1/16th of the
362d7067512SJosef Bacik  * queue depth at a time so we don't get wild swings and hopefully dial in to
363d7067512SJosef Bacik  * fairer distribution of the overall queue depth.
364d7067512SJosef Bacik  */
365d7067512SJosef Bacik static void scale_change(struct iolatency_grp *iolat, bool up)
366d7067512SJosef Bacik {
367ff4cee08SJosef Bacik 	unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
368d7067512SJosef Bacik 	unsigned long scale = scale_amount(qd, up);
369d7067512SJosef Bacik 	unsigned long old = iolat->rq_depth.max_depth;
370d7067512SJosef Bacik 
371d7067512SJosef Bacik 	if (old > qd)
372d7067512SJosef Bacik 		old = qd;
373d7067512SJosef Bacik 
374d7067512SJosef Bacik 	if (up) {
375d7067512SJosef Bacik 		if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
376d7067512SJosef Bacik 			return;
377d7067512SJosef Bacik 
378d7067512SJosef Bacik 		if (old < qd) {
379d7067512SJosef Bacik 			old += scale;
380d7067512SJosef Bacik 			old = min(old, qd);
381d7067512SJosef Bacik 			iolat->rq_depth.max_depth = old;
382d7067512SJosef Bacik 			wake_up_all(&iolat->rq_wait.wait);
383d7067512SJosef Bacik 		}
3849f60511aSJosef Bacik 	} else {
385d7067512SJosef Bacik 		old >>= 1;
386d7067512SJosef Bacik 		iolat->rq_depth.max_depth = max(old, 1UL);
387d7067512SJosef Bacik 	}
388d7067512SJosef Bacik }
389d7067512SJosef Bacik 
390d7067512SJosef Bacik /* Check our parent and see if the scale cookie has changed. */
391d7067512SJosef Bacik static void check_scale_change(struct iolatency_grp *iolat)
392d7067512SJosef Bacik {
393d7067512SJosef Bacik 	struct iolatency_grp *parent;
394d7067512SJosef Bacik 	struct child_latency_info *lat_info;
395d7067512SJosef Bacik 	unsigned int cur_cookie;
396d7067512SJosef Bacik 	unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
397d7067512SJosef Bacik 	u64 scale_lat;
398d7067512SJosef Bacik 	unsigned int old;
399d7067512SJosef Bacik 	int direction = 0;
400d7067512SJosef Bacik 
401d7067512SJosef Bacik 	if (lat_to_blkg(iolat)->parent == NULL)
402d7067512SJosef Bacik 		return;
403d7067512SJosef Bacik 
404d7067512SJosef Bacik 	parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
405d7067512SJosef Bacik 	if (!parent)
406d7067512SJosef Bacik 		return;
407d7067512SJosef Bacik 
408d7067512SJosef Bacik 	lat_info = &parent->child_lat;
409d7067512SJosef Bacik 	cur_cookie = atomic_read(&lat_info->scale_cookie);
410d7067512SJosef Bacik 	scale_lat = READ_ONCE(lat_info->scale_lat);
411d7067512SJosef Bacik 
412d7067512SJosef Bacik 	if (cur_cookie < our_cookie)
413d7067512SJosef Bacik 		direction = -1;
414d7067512SJosef Bacik 	else if (cur_cookie > our_cookie)
415d7067512SJosef Bacik 		direction = 1;
416d7067512SJosef Bacik 	else
417d7067512SJosef Bacik 		return;
418d7067512SJosef Bacik 
419d7067512SJosef Bacik 	old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
420d7067512SJosef Bacik 
421d7067512SJosef Bacik 	/* Somebody beat us to the punch, just bail. */
422d7067512SJosef Bacik 	if (old != our_cookie)
423d7067512SJosef Bacik 		return;
424d7067512SJosef Bacik 
425d7067512SJosef Bacik 	if (direction < 0 && iolat->min_lat_nsec) {
426d7067512SJosef Bacik 		u64 samples_thresh;
427d7067512SJosef Bacik 
428d7067512SJosef Bacik 		if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
429d7067512SJosef Bacik 			return;
430d7067512SJosef Bacik 
431d7067512SJosef Bacik 		/*
432d7067512SJosef Bacik 		 * Sometimes high priority groups are their own worst enemy, so
433d7067512SJosef Bacik 		 * instead of taking it out on some poor other group that did 5%
434d7067512SJosef Bacik 		 * or less of the IO's for the last summation just skip this
435d7067512SJosef Bacik 		 * scale down event.
436d7067512SJosef Bacik 		 */
437d7067512SJosef Bacik 		samples_thresh = lat_info->nr_samples * 5;
43822ed8a93SJosef Bacik 		samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
439d7067512SJosef Bacik 		if (iolat->nr_samples <= samples_thresh)
440d7067512SJosef Bacik 			return;
441d7067512SJosef Bacik 	}
442d7067512SJosef Bacik 
443d7067512SJosef Bacik 	/* We're as low as we can go. */
444d7067512SJosef Bacik 	if (iolat->rq_depth.max_depth == 1 && direction < 0) {
445d7067512SJosef Bacik 		blkcg_use_delay(lat_to_blkg(iolat));
446d7067512SJosef Bacik 		return;
447d7067512SJosef Bacik 	}
448d7067512SJosef Bacik 
449d7067512SJosef Bacik 	/* We're back to the default cookie, unthrottle all the things. */
450d7067512SJosef Bacik 	if (cur_cookie == DEFAULT_SCALE_COOKIE) {
451d7067512SJosef Bacik 		blkcg_clear_delay(lat_to_blkg(iolat));
452a284390bSJosef Bacik 		iolat->rq_depth.max_depth = UINT_MAX;
453d7067512SJosef Bacik 		wake_up_all(&iolat->rq_wait.wait);
454d7067512SJosef Bacik 		return;
455d7067512SJosef Bacik 	}
456d7067512SJosef Bacik 
457d7067512SJosef Bacik 	scale_change(iolat, direction > 0);
458d7067512SJosef Bacik }
459d7067512SJosef Bacik 
460d5337560SChristoph Hellwig static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
461d7067512SJosef Bacik {
462d7067512SJosef Bacik 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
4635cdf2e3fSDennis Zhou 	struct blkcg_gq *blkg = bio->bi_blkg;
464d7067512SJosef Bacik 	bool issue_as_root = bio_issue_as_root_blkg(bio);
465d7067512SJosef Bacik 
466d7067512SJosef Bacik 	if (!blk_iolatency_enabled(blkiolat))
467d7067512SJosef Bacik 		return;
468d7067512SJosef Bacik 
469d7067512SJosef Bacik 	while (blkg && blkg->parent) {
470d7067512SJosef Bacik 		struct iolatency_grp *iolat = blkg_to_lat(blkg);
471d7067512SJosef Bacik 		if (!iolat) {
472d7067512SJosef Bacik 			blkg = blkg->parent;
473d7067512SJosef Bacik 			continue;
474d7067512SJosef Bacik 		}
475d7067512SJosef Bacik 
476d7067512SJosef Bacik 		check_scale_change(iolat);
477d5337560SChristoph Hellwig 		__blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
478d7067512SJosef Bacik 				     (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
479d7067512SJosef Bacik 		blkg = blkg->parent;
480d7067512SJosef Bacik 	}
481d7067512SJosef Bacik 	if (!timer_pending(&blkiolat->timer))
482d7067512SJosef Bacik 		mod_timer(&blkiolat->timer, jiffies + HZ);
483d7067512SJosef Bacik }
484d7067512SJosef Bacik 
485d7067512SJosef Bacik static void iolatency_record_time(struct iolatency_grp *iolat,
486d7067512SJosef Bacik 				  struct bio_issue *issue, u64 now,
487d7067512SJosef Bacik 				  bool issue_as_root)
488d7067512SJosef Bacik {
489d7067512SJosef Bacik 	u64 start = bio_issue_time(issue);
490d7067512SJosef Bacik 	u64 req_time;
491d7067512SJosef Bacik 
49271e9690bSJosef Bacik 	/*
49371e9690bSJosef Bacik 	 * Have to do this so we are truncated to the correct time that our
49471e9690bSJosef Bacik 	 * issue is truncated to.
49571e9690bSJosef Bacik 	 */
49671e9690bSJosef Bacik 	now = __bio_issue_time(now);
49771e9690bSJosef Bacik 
498d7067512SJosef Bacik 	if (now <= start)
499d7067512SJosef Bacik 		return;
500d7067512SJosef Bacik 
501d7067512SJosef Bacik 	req_time = now - start;
502d7067512SJosef Bacik 
503d7067512SJosef Bacik 	/*
504d7067512SJosef Bacik 	 * We don't want to count issue_as_root bio's in the cgroups latency
505d7067512SJosef Bacik 	 * statistics as it could skew the numbers downwards.
506d7067512SJosef Bacik 	 */
507a284390bSJosef Bacik 	if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
508d7067512SJosef Bacik 		u64 sub = iolat->min_lat_nsec;
509d7067512SJosef Bacik 		if (req_time < sub)
510d7067512SJosef Bacik 			blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
511d7067512SJosef Bacik 		return;
512d7067512SJosef Bacik 	}
513d7067512SJosef Bacik 
5141fa2840eSJosef Bacik 	latency_stat_record_time(iolat, req_time);
515d7067512SJosef Bacik }
516d7067512SJosef Bacik 
517d7067512SJosef Bacik #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
518d7067512SJosef Bacik #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
519d7067512SJosef Bacik 
520d7067512SJosef Bacik static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
521d7067512SJosef Bacik {
522d7067512SJosef Bacik 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
523d7067512SJosef Bacik 	struct iolatency_grp *parent;
524d7067512SJosef Bacik 	struct child_latency_info *lat_info;
5251fa2840eSJosef Bacik 	struct latency_stat stat;
526d7067512SJosef Bacik 	unsigned long flags;
5271fa2840eSJosef Bacik 	int cpu;
528d7067512SJosef Bacik 
5291fa2840eSJosef Bacik 	latency_stat_init(iolat, &stat);
530d7067512SJosef Bacik 	preempt_disable();
531d7067512SJosef Bacik 	for_each_online_cpu(cpu) {
5321fa2840eSJosef Bacik 		struct latency_stat *s;
533d7067512SJosef Bacik 		s = per_cpu_ptr(iolat->stats, cpu);
5341fa2840eSJosef Bacik 		latency_stat_sum(iolat, &stat, s);
5351fa2840eSJosef Bacik 		latency_stat_init(iolat, s);
536d7067512SJosef Bacik 	}
537d7067512SJosef Bacik 	preempt_enable();
538d7067512SJosef Bacik 
539d7067512SJosef Bacik 	parent = blkg_to_lat(blkg->parent);
540d7067512SJosef Bacik 	if (!parent)
541d7067512SJosef Bacik 		return;
542d7067512SJosef Bacik 
543d7067512SJosef Bacik 	lat_info = &parent->child_lat;
544d7067512SJosef Bacik 
5451fa2840eSJosef Bacik 	iolat_update_total_lat_avg(iolat, &stat);
546d7067512SJosef Bacik 
547d7067512SJosef Bacik 	/* Everything is ok and we don't need to adjust the scale. */
5481fa2840eSJosef Bacik 	if (latency_sum_ok(iolat, &stat) &&
549d7067512SJosef Bacik 	    atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
550d7067512SJosef Bacik 		return;
551d7067512SJosef Bacik 
552d7067512SJosef Bacik 	/* Somebody beat us to the punch, just bail. */
553d7067512SJosef Bacik 	spin_lock_irqsave(&lat_info->lock, flags);
554451bb7c3SJosef Bacik 
555451bb7c3SJosef Bacik 	latency_stat_sum(iolat, &iolat->cur_stat, &stat);
556d7067512SJosef Bacik 	lat_info->nr_samples -= iolat->nr_samples;
557451bb7c3SJosef Bacik 	lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
558451bb7c3SJosef Bacik 	iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
559d7067512SJosef Bacik 
560d7067512SJosef Bacik 	if ((lat_info->last_scale_event >= now ||
561451bb7c3SJosef Bacik 	    now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
562d7067512SJosef Bacik 		goto out;
563d7067512SJosef Bacik 
564451bb7c3SJosef Bacik 	if (latency_sum_ok(iolat, &iolat->cur_stat) &&
565451bb7c3SJosef Bacik 	    latency_sum_ok(iolat, &stat)) {
566451bb7c3SJosef Bacik 		if (latency_stat_samples(iolat, &iolat->cur_stat) <
5671fa2840eSJosef Bacik 		    BLKIOLATENCY_MIN_GOOD_SAMPLES)
5681fa2840eSJosef Bacik 			goto out;
569d7067512SJosef Bacik 		if (lat_info->scale_grp == iolat) {
570d7067512SJosef Bacik 			lat_info->last_scale_event = now;
571d7067512SJosef Bacik 			scale_cookie_change(iolat->blkiolat, lat_info, true);
572d7067512SJosef Bacik 		}
573451bb7c3SJosef Bacik 	} else if (lat_info->scale_lat == 0 ||
574451bb7c3SJosef Bacik 		   lat_info->scale_lat >= iolat->min_lat_nsec) {
575d7067512SJosef Bacik 		lat_info->last_scale_event = now;
576d7067512SJosef Bacik 		if (!lat_info->scale_grp ||
577d7067512SJosef Bacik 		    lat_info->scale_lat > iolat->min_lat_nsec) {
578d7067512SJosef Bacik 			WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
579d7067512SJosef Bacik 			lat_info->scale_grp = iolat;
580d7067512SJosef Bacik 		}
581d7067512SJosef Bacik 		scale_cookie_change(iolat->blkiolat, lat_info, false);
582d7067512SJosef Bacik 	}
583451bb7c3SJosef Bacik 	latency_stat_init(iolat, &iolat->cur_stat);
584d7067512SJosef Bacik out:
585d7067512SJosef Bacik 	spin_unlock_irqrestore(&lat_info->lock, flags);
586d7067512SJosef Bacik }
587d7067512SJosef Bacik 
588d7067512SJosef Bacik static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
589d7067512SJosef Bacik {
590d7067512SJosef Bacik 	struct blkcg_gq *blkg;
591d7067512SJosef Bacik 	struct rq_wait *rqw;
592d7067512SJosef Bacik 	struct iolatency_grp *iolat;
593d7067512SJosef Bacik 	u64 window_start;
594d7067512SJosef Bacik 	u64 now = ktime_to_ns(ktime_get());
595d7067512SJosef Bacik 	bool issue_as_root = bio_issue_as_root_blkg(bio);
596d7067512SJosef Bacik 	bool enabled = false;
597391f552aSLiu Bo 	int inflight = 0;
598d7067512SJosef Bacik 
599d7067512SJosef Bacik 	blkg = bio->bi_blkg;
60013369816SDennis Zhou 	if (!blkg || !bio_flagged(bio, BIO_TRACKED))
601d7067512SJosef Bacik 		return;
602d7067512SJosef Bacik 
603d7067512SJosef Bacik 	iolat = blkg_to_lat(bio->bi_blkg);
604d7067512SJosef Bacik 	if (!iolat)
605d7067512SJosef Bacik 		return;
606d7067512SJosef Bacik 
607d7067512SJosef Bacik 	enabled = blk_iolatency_enabled(iolat->blkiolat);
6088c772a9bSLiu Bo 	if (!enabled)
6098c772a9bSLiu Bo 		return;
6108c772a9bSLiu Bo 
611d7067512SJosef Bacik 	while (blkg && blkg->parent) {
612d7067512SJosef Bacik 		iolat = blkg_to_lat(blkg);
613d7067512SJosef Bacik 		if (!iolat) {
614d7067512SJosef Bacik 			blkg = blkg->parent;
615d7067512SJosef Bacik 			continue;
616d7067512SJosef Bacik 		}
617d7067512SJosef Bacik 		rqw = &iolat->rq_wait;
618d7067512SJosef Bacik 
619391f552aSLiu Bo 		inflight = atomic_dec_return(&rqw->inflight);
620391f552aSLiu Bo 		WARN_ON_ONCE(inflight < 0);
621c9b3007fSDennis Zhou 		/*
622c9b3007fSDennis Zhou 		 * If bi_status is BLK_STS_AGAIN, the bio wasn't actually
623c9b3007fSDennis Zhou 		 * submitted, so do not account for it.
624c9b3007fSDennis Zhou 		 */
625c9b3007fSDennis Zhou 		if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
626d7067512SJosef Bacik 			iolatency_record_time(iolat, &bio->bi_issue, now,
627d7067512SJosef Bacik 					      issue_as_root);
628d7067512SJosef Bacik 			window_start = atomic64_read(&iolat->window_start);
629d7067512SJosef Bacik 			if (now > window_start &&
630d7067512SJosef Bacik 			    (now - window_start) >= iolat->cur_win_nsec) {
631d7067512SJosef Bacik 				if (atomic64_cmpxchg(&iolat->window_start,
632d7067512SJosef Bacik 					     window_start, now) == window_start)
633d7067512SJosef Bacik 					iolatency_check_latencies(iolat, now);
634d7067512SJosef Bacik 			}
635d7067512SJosef Bacik 		}
636d7067512SJosef Bacik 		wake_up(&rqw->wait);
637d7067512SJosef Bacik 		blkg = blkg->parent;
638d7067512SJosef Bacik 	}
639d7067512SJosef Bacik }
640d7067512SJosef Bacik 
641d7067512SJosef Bacik static void blkcg_iolatency_exit(struct rq_qos *rqos)
642d7067512SJosef Bacik {
643d7067512SJosef Bacik 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
644d7067512SJosef Bacik 
645d7067512SJosef Bacik 	del_timer_sync(&blkiolat->timer);
646d7067512SJosef Bacik 	blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
647d7067512SJosef Bacik 	kfree(blkiolat);
648d7067512SJosef Bacik }
649d7067512SJosef Bacik 
650d7067512SJosef Bacik static struct rq_qos_ops blkcg_iolatency_ops = {
651d7067512SJosef Bacik 	.throttle = blkcg_iolatency_throttle,
652d7067512SJosef Bacik 	.done_bio = blkcg_iolatency_done_bio,
653d7067512SJosef Bacik 	.exit = blkcg_iolatency_exit,
654d7067512SJosef Bacik };
655d7067512SJosef Bacik 
656d7067512SJosef Bacik static void blkiolatency_timer_fn(struct timer_list *t)
657d7067512SJosef Bacik {
658d7067512SJosef Bacik 	struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
659d7067512SJosef Bacik 	struct blkcg_gq *blkg;
660d7067512SJosef Bacik 	struct cgroup_subsys_state *pos_css;
661d7067512SJosef Bacik 	u64 now = ktime_to_ns(ktime_get());
662d7067512SJosef Bacik 
663d7067512SJosef Bacik 	rcu_read_lock();
664d7067512SJosef Bacik 	blkg_for_each_descendant_pre(blkg, pos_css,
665d7067512SJosef Bacik 				     blkiolat->rqos.q->root_blkg) {
666d7067512SJosef Bacik 		struct iolatency_grp *iolat;
667d7067512SJosef Bacik 		struct child_latency_info *lat_info;
668d7067512SJosef Bacik 		unsigned long flags;
669d7067512SJosef Bacik 		u64 cookie;
670d7067512SJosef Bacik 
671d7067512SJosef Bacik 		/*
672d7067512SJosef Bacik 		 * We could be exiting, don't access the pd unless we have a
673d7067512SJosef Bacik 		 * ref on the blkg.
674d7067512SJosef Bacik 		 */
6757754f669SDennis Zhou 		if (!blkg_tryget(blkg))
676d7067512SJosef Bacik 			continue;
677d7067512SJosef Bacik 
678d7067512SJosef Bacik 		iolat = blkg_to_lat(blkg);
679d7067512SJosef Bacik 		if (!iolat)
68052a1199cSJosef Bacik 			goto next;
681d7067512SJosef Bacik 
682d7067512SJosef Bacik 		lat_info = &iolat->child_lat;
683d7067512SJosef Bacik 		cookie = atomic_read(&lat_info->scale_cookie);
684d7067512SJosef Bacik 
685d7067512SJosef Bacik 		if (cookie >= DEFAULT_SCALE_COOKIE)
686d7067512SJosef Bacik 			goto next;
687d7067512SJosef Bacik 
688d7067512SJosef Bacik 		spin_lock_irqsave(&lat_info->lock, flags);
689d7067512SJosef Bacik 		if (lat_info->last_scale_event >= now)
690d7067512SJosef Bacik 			goto next_lock;
691d7067512SJosef Bacik 
692d7067512SJosef Bacik 		/*
693d7067512SJosef Bacik 		 * We scaled down but don't have a scale_grp, scale up and carry
694d7067512SJosef Bacik 		 * on.
695d7067512SJosef Bacik 		 */
696d7067512SJosef Bacik 		if (lat_info->scale_grp == NULL) {
697d7067512SJosef Bacik 			scale_cookie_change(iolat->blkiolat, lat_info, true);
698d7067512SJosef Bacik 			goto next_lock;
699d7067512SJosef Bacik 		}
700d7067512SJosef Bacik 
701d7067512SJosef Bacik 		/*
702d7067512SJosef Bacik 		 * It's been 5 seconds since our last scale event, clear the
703d7067512SJosef Bacik 		 * scale grp in case the group that needed the scale down isn't
704d7067512SJosef Bacik 		 * doing any IO currently.
705d7067512SJosef Bacik 		 */
706d7067512SJosef Bacik 		if (now - lat_info->last_scale_event >=
707d7067512SJosef Bacik 		    ((u64)NSEC_PER_SEC * 5))
708d7067512SJosef Bacik 			lat_info->scale_grp = NULL;
709d7067512SJosef Bacik next_lock:
710d7067512SJosef Bacik 		spin_unlock_irqrestore(&lat_info->lock, flags);
711d7067512SJosef Bacik next:
712d7067512SJosef Bacik 		blkg_put(blkg);
713d7067512SJosef Bacik 	}
714d7067512SJosef Bacik 	rcu_read_unlock();
715d7067512SJosef Bacik }
716d7067512SJosef Bacik 
717d7067512SJosef Bacik int blk_iolatency_init(struct request_queue *q)
718d7067512SJosef Bacik {
719d7067512SJosef Bacik 	struct blk_iolatency *blkiolat;
720d7067512SJosef Bacik 	struct rq_qos *rqos;
721d7067512SJosef Bacik 	int ret;
722d7067512SJosef Bacik 
723d7067512SJosef Bacik 	blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
724d7067512SJosef Bacik 	if (!blkiolat)
725d7067512SJosef Bacik 		return -ENOMEM;
726d7067512SJosef Bacik 
727d7067512SJosef Bacik 	rqos = &blkiolat->rqos;
728d7067512SJosef Bacik 	rqos->id = RQ_QOS_CGROUP;
729d7067512SJosef Bacik 	rqos->ops = &blkcg_iolatency_ops;
730d7067512SJosef Bacik 	rqos->q = q;
731d7067512SJosef Bacik 
732d7067512SJosef Bacik 	rq_qos_add(q, rqos);
733d7067512SJosef Bacik 
734d7067512SJosef Bacik 	ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
735d7067512SJosef Bacik 	if (ret) {
736d7067512SJosef Bacik 		rq_qos_del(q, rqos);
737d7067512SJosef Bacik 		kfree(blkiolat);
738d7067512SJosef Bacik 		return ret;
739d7067512SJosef Bacik 	}
740d7067512SJosef Bacik 
741d7067512SJosef Bacik 	timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
742d7067512SJosef Bacik 
743d7067512SJosef Bacik 	return 0;
744d7067512SJosef Bacik }
745d7067512SJosef Bacik 
7468c772a9bSLiu Bo /*
7478c772a9bSLiu Bo  * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
7488c772a9bSLiu Bo  * return 0.
7498c772a9bSLiu Bo  */
7508c772a9bSLiu Bo static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
751d7067512SJosef Bacik {
752d7067512SJosef Bacik 	struct iolatency_grp *iolat = blkg_to_lat(blkg);
753d7067512SJosef Bacik 	u64 oldval = iolat->min_lat_nsec;
754d7067512SJosef Bacik 
755d7067512SJosef Bacik 	iolat->min_lat_nsec = val;
756c480bcf9SDennis Zhou (Facebook) 	iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
757c480bcf9SDennis Zhou (Facebook) 	iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
758c480bcf9SDennis Zhou (Facebook) 				    BLKIOLATENCY_MAX_WIN_SIZE);
759d7067512SJosef Bacik 
760d7067512SJosef Bacik 	if (!oldval && val)
7618c772a9bSLiu Bo 		return 1;
7625de0073fSTejun Heo 	if (oldval && !val) {
7635de0073fSTejun Heo 		blkcg_clear_delay(blkg);
7648c772a9bSLiu Bo 		return -1;
7655de0073fSTejun Heo 	}
7668c772a9bSLiu Bo 	return 0;
767d7067512SJosef Bacik }
768d7067512SJosef Bacik 
769d7067512SJosef Bacik static void iolatency_clear_scaling(struct blkcg_gq *blkg)
770d7067512SJosef Bacik {
771d7067512SJosef Bacik 	if (blkg->parent) {
772d7067512SJosef Bacik 		struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
773d7067512SJosef Bacik 		struct child_latency_info *lat_info;
774d7067512SJosef Bacik 		if (!iolat)
775d7067512SJosef Bacik 			return;
776d7067512SJosef Bacik 
777d7067512SJosef Bacik 		lat_info = &iolat->child_lat;
778d7067512SJosef Bacik 		spin_lock(&lat_info->lock);
779d7067512SJosef Bacik 		atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
780d7067512SJosef Bacik 		lat_info->last_scale_event = 0;
781d7067512SJosef Bacik 		lat_info->scale_grp = NULL;
782d7067512SJosef Bacik 		lat_info->scale_lat = 0;
783d7067512SJosef Bacik 		spin_unlock(&lat_info->lock);
784d7067512SJosef Bacik 	}
785d7067512SJosef Bacik }
786d7067512SJosef Bacik 
787d7067512SJosef Bacik static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
788d7067512SJosef Bacik 			     size_t nbytes, loff_t off)
789d7067512SJosef Bacik {
790d7067512SJosef Bacik 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
791d7067512SJosef Bacik 	struct blkcg_gq *blkg;
792d7067512SJosef Bacik 	struct blkg_conf_ctx ctx;
793d7067512SJosef Bacik 	struct iolatency_grp *iolat;
794d7067512SJosef Bacik 	char *p, *tok;
795d7067512SJosef Bacik 	u64 lat_val = 0;
796d7067512SJosef Bacik 	u64 oldval;
797d7067512SJosef Bacik 	int ret;
7988c772a9bSLiu Bo 	int enable = 0;
799d7067512SJosef Bacik 
800d7067512SJosef Bacik 	ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
801d7067512SJosef Bacik 	if (ret)
802d7067512SJosef Bacik 		return ret;
803d7067512SJosef Bacik 
804d7067512SJosef Bacik 	iolat = blkg_to_lat(ctx.blkg);
805d7067512SJosef Bacik 	p = ctx.body;
806d7067512SJosef Bacik 
807d7067512SJosef Bacik 	ret = -EINVAL;
808d7067512SJosef Bacik 	while ((tok = strsep(&p, " "))) {
809d7067512SJosef Bacik 		char key[16];
810d7067512SJosef Bacik 		char val[21];	/* 18446744073709551616 */
811d7067512SJosef Bacik 
812d7067512SJosef Bacik 		if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
813d7067512SJosef Bacik 			goto out;
814d7067512SJosef Bacik 
815d7067512SJosef Bacik 		if (!strcmp(key, "target")) {
816d7067512SJosef Bacik 			u64 v;
817d7067512SJosef Bacik 
818d7067512SJosef Bacik 			if (!strcmp(val, "max"))
819d7067512SJosef Bacik 				lat_val = 0;
820d7067512SJosef Bacik 			else if (sscanf(val, "%llu", &v) == 1)
821d7067512SJosef Bacik 				lat_val = v * NSEC_PER_USEC;
822d7067512SJosef Bacik 			else
823d7067512SJosef Bacik 				goto out;
824d7067512SJosef Bacik 		} else {
825d7067512SJosef Bacik 			goto out;
826d7067512SJosef Bacik 		}
827d7067512SJosef Bacik 	}
828d7067512SJosef Bacik 
829d7067512SJosef Bacik 	/* Walk up the tree to see if our new val is lower than it should be. */
830d7067512SJosef Bacik 	blkg = ctx.blkg;
831d7067512SJosef Bacik 	oldval = iolat->min_lat_nsec;
832d7067512SJosef Bacik 
8338c772a9bSLiu Bo 	enable = iolatency_set_min_lat_nsec(blkg, lat_val);
8348c772a9bSLiu Bo 	if (enable) {
8358c772a9bSLiu Bo 		WARN_ON_ONCE(!blk_get_queue(blkg->q));
8368c772a9bSLiu Bo 		blkg_get(blkg);
8378c772a9bSLiu Bo 	}
8388c772a9bSLiu Bo 
839d7067512SJosef Bacik 	if (oldval != iolat->min_lat_nsec) {
840d7067512SJosef Bacik 		iolatency_clear_scaling(blkg);
841d7067512SJosef Bacik 	}
842d7067512SJosef Bacik 
843d7067512SJosef Bacik 	ret = 0;
844d7067512SJosef Bacik out:
845d7067512SJosef Bacik 	blkg_conf_finish(&ctx);
8468c772a9bSLiu Bo 	if (ret == 0 && enable) {
8478c772a9bSLiu Bo 		struct iolatency_grp *tmp = blkg_to_lat(blkg);
8488c772a9bSLiu Bo 		struct blk_iolatency *blkiolat = tmp->blkiolat;
8498c772a9bSLiu Bo 
8508c772a9bSLiu Bo 		blk_mq_freeze_queue(blkg->q);
8518c772a9bSLiu Bo 
8528c772a9bSLiu Bo 		if (enable == 1)
8538c772a9bSLiu Bo 			atomic_inc(&blkiolat->enabled);
8548c772a9bSLiu Bo 		else if (enable == -1)
8558c772a9bSLiu Bo 			atomic_dec(&blkiolat->enabled);
8568c772a9bSLiu Bo 		else
8578c772a9bSLiu Bo 			WARN_ON_ONCE(1);
8588c772a9bSLiu Bo 
8598c772a9bSLiu Bo 		blk_mq_unfreeze_queue(blkg->q);
8608c772a9bSLiu Bo 
8618c772a9bSLiu Bo 		blkg_put(blkg);
8628c772a9bSLiu Bo 		blk_put_queue(blkg->q);
8638c772a9bSLiu Bo 	}
864d7067512SJosef Bacik 	return ret ?: nbytes;
865d7067512SJosef Bacik }
866d7067512SJosef Bacik 
867d7067512SJosef Bacik static u64 iolatency_prfill_limit(struct seq_file *sf,
868d7067512SJosef Bacik 				  struct blkg_policy_data *pd, int off)
869d7067512SJosef Bacik {
870d7067512SJosef Bacik 	struct iolatency_grp *iolat = pd_to_lat(pd);
871d7067512SJosef Bacik 	const char *dname = blkg_dev_name(pd->blkg);
872d7067512SJosef Bacik 
873d7067512SJosef Bacik 	if (!dname || !iolat->min_lat_nsec)
874d7067512SJosef Bacik 		return 0;
875d7067512SJosef Bacik 	seq_printf(sf, "%s target=%llu\n",
87688b7210cSArnd Bergmann 		   dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
877d7067512SJosef Bacik 	return 0;
878d7067512SJosef Bacik }
879d7067512SJosef Bacik 
880d7067512SJosef Bacik static int iolatency_print_limit(struct seq_file *sf, void *v)
881d7067512SJosef Bacik {
882d7067512SJosef Bacik 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
883d7067512SJosef Bacik 			  iolatency_prfill_limit,
884d7067512SJosef Bacik 			  &blkcg_policy_iolatency, seq_cft(sf)->private, false);
885d7067512SJosef Bacik 	return 0;
886d7067512SJosef Bacik }
887d7067512SJosef Bacik 
8881fa2840eSJosef Bacik static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
8891fa2840eSJosef Bacik 				 size_t size)
8901fa2840eSJosef Bacik {
8911fa2840eSJosef Bacik 	struct latency_stat stat;
8921fa2840eSJosef Bacik 	int cpu;
8931fa2840eSJosef Bacik 
8941fa2840eSJosef Bacik 	latency_stat_init(iolat, &stat);
8951fa2840eSJosef Bacik 	preempt_disable();
8961fa2840eSJosef Bacik 	for_each_online_cpu(cpu) {
8971fa2840eSJosef Bacik 		struct latency_stat *s;
8981fa2840eSJosef Bacik 		s = per_cpu_ptr(iolat->stats, cpu);
8991fa2840eSJosef Bacik 		latency_stat_sum(iolat, &stat, s);
9001fa2840eSJosef Bacik 	}
9011fa2840eSJosef Bacik 	preempt_enable();
9021fa2840eSJosef Bacik 
9031fa2840eSJosef Bacik 	if (iolat->rq_depth.max_depth == UINT_MAX)
9041fa2840eSJosef Bacik 		return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
9051fa2840eSJosef Bacik 				 (unsigned long long)stat.ps.missed,
9061fa2840eSJosef Bacik 				 (unsigned long long)stat.ps.total);
9071fa2840eSJosef Bacik 	return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
9081fa2840eSJosef Bacik 			 (unsigned long long)stat.ps.missed,
9091fa2840eSJosef Bacik 			 (unsigned long long)stat.ps.total,
9101fa2840eSJosef Bacik 			 iolat->rq_depth.max_depth);
9111fa2840eSJosef Bacik }
9121fa2840eSJosef Bacik 
913d7067512SJosef Bacik static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
914d7067512SJosef Bacik 				size_t size)
915d7067512SJosef Bacik {
916d7067512SJosef Bacik 	struct iolatency_grp *iolat = pd_to_lat(pd);
9171fa2840eSJosef Bacik 	unsigned long long avg_lat;
9181fa2840eSJosef Bacik 	unsigned long long cur_win;
919d7067512SJosef Bacik 
92007b0fdecSTejun Heo 	if (!blkcg_debug_stats)
92107b0fdecSTejun Heo 		return 0;
92207b0fdecSTejun Heo 
9231fa2840eSJosef Bacik 	if (iolat->ssd)
9241fa2840eSJosef Bacik 		return iolatency_ssd_stat(iolat, buf, size);
9251fa2840eSJosef Bacik 
9261fa2840eSJosef Bacik 	avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
9271fa2840eSJosef Bacik 	cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
928a284390bSJosef Bacik 	if (iolat->rq_depth.max_depth == UINT_MAX)
929c480bcf9SDennis Zhou (Facebook) 		return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
930c480bcf9SDennis Zhou (Facebook) 				 avg_lat, cur_win);
931d7067512SJosef Bacik 
932c480bcf9SDennis Zhou (Facebook) 	return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
933c480bcf9SDennis Zhou (Facebook) 			 iolat->rq_depth.max_depth, avg_lat, cur_win);
934d7067512SJosef Bacik }
935d7067512SJosef Bacik 
936d7067512SJosef Bacik 
937cf09a8eeSTejun Heo static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp,
938cf09a8eeSTejun Heo 						   struct request_queue *q,
939cf09a8eeSTejun Heo 						   struct blkcg *blkcg)
940d7067512SJosef Bacik {
941d7067512SJosef Bacik 	struct iolatency_grp *iolat;
942d7067512SJosef Bacik 
943cf09a8eeSTejun Heo 	iolat = kzalloc_node(sizeof(*iolat), gfp, q->node);
944d7067512SJosef Bacik 	if (!iolat)
945d7067512SJosef Bacik 		return NULL;
9461fa2840eSJosef Bacik 	iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
9471fa2840eSJosef Bacik 				       __alignof__(struct latency_stat), gfp);
948d7067512SJosef Bacik 	if (!iolat->stats) {
949d7067512SJosef Bacik 		kfree(iolat);
950d7067512SJosef Bacik 		return NULL;
951d7067512SJosef Bacik 	}
952d7067512SJosef Bacik 	return &iolat->pd;
953d7067512SJosef Bacik }
954d7067512SJosef Bacik 
955d7067512SJosef Bacik static void iolatency_pd_init(struct blkg_policy_data *pd)
956d7067512SJosef Bacik {
957d7067512SJosef Bacik 	struct iolatency_grp *iolat = pd_to_lat(pd);
958d7067512SJosef Bacik 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
959d7067512SJosef Bacik 	struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
960d7067512SJosef Bacik 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
961d7067512SJosef Bacik 	u64 now = ktime_to_ns(ktime_get());
962d7067512SJosef Bacik 	int cpu;
963d7067512SJosef Bacik 
9641fa2840eSJosef Bacik 	if (blk_queue_nonrot(blkg->q))
9651fa2840eSJosef Bacik 		iolat->ssd = true;
9661fa2840eSJosef Bacik 	else
9671fa2840eSJosef Bacik 		iolat->ssd = false;
9681fa2840eSJosef Bacik 
969d7067512SJosef Bacik 	for_each_possible_cpu(cpu) {
9701fa2840eSJosef Bacik 		struct latency_stat *stat;
971d7067512SJosef Bacik 		stat = per_cpu_ptr(iolat->stats, cpu);
9721fa2840eSJosef Bacik 		latency_stat_init(iolat, stat);
973d7067512SJosef Bacik 	}
974d7067512SJosef Bacik 
975451bb7c3SJosef Bacik 	latency_stat_init(iolat, &iolat->cur_stat);
976d7067512SJosef Bacik 	rq_wait_init(&iolat->rq_wait);
977d7067512SJosef Bacik 	spin_lock_init(&iolat->child_lat.lock);
978ff4cee08SJosef Bacik 	iolat->rq_depth.queue_depth = blkg->q->nr_requests;
979a284390bSJosef Bacik 	iolat->rq_depth.max_depth = UINT_MAX;
980d7067512SJosef Bacik 	iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
981d7067512SJosef Bacik 	iolat->blkiolat = blkiolat;
982d7067512SJosef Bacik 	iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
983d7067512SJosef Bacik 	atomic64_set(&iolat->window_start, now);
984d7067512SJosef Bacik 
985d7067512SJosef Bacik 	/*
986d7067512SJosef Bacik 	 * We init things in list order, so the pd for the parent may not be
987d7067512SJosef Bacik 	 * init'ed yet for whatever reason.
988d7067512SJosef Bacik 	 */
989d7067512SJosef Bacik 	if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
990d7067512SJosef Bacik 		struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
991d7067512SJosef Bacik 		atomic_set(&iolat->scale_cookie,
992d7067512SJosef Bacik 			   atomic_read(&parent->child_lat.scale_cookie));
993d7067512SJosef Bacik 	} else {
994d7067512SJosef Bacik 		atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
995d7067512SJosef Bacik 	}
996d7067512SJosef Bacik 
997d7067512SJosef Bacik 	atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
998d7067512SJosef Bacik }
999d7067512SJosef Bacik 
1000d7067512SJosef Bacik static void iolatency_pd_offline(struct blkg_policy_data *pd)
1001d7067512SJosef Bacik {
1002d7067512SJosef Bacik 	struct iolatency_grp *iolat = pd_to_lat(pd);
1003d7067512SJosef Bacik 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
10048c772a9bSLiu Bo 	struct blk_iolatency *blkiolat = iolat->blkiolat;
10058c772a9bSLiu Bo 	int ret;
1006d7067512SJosef Bacik 
10078c772a9bSLiu Bo 	ret = iolatency_set_min_lat_nsec(blkg, 0);
10088c772a9bSLiu Bo 	if (ret == 1)
10098c772a9bSLiu Bo 		atomic_inc(&blkiolat->enabled);
10108c772a9bSLiu Bo 	if (ret == -1)
10118c772a9bSLiu Bo 		atomic_dec(&blkiolat->enabled);
1012d7067512SJosef Bacik 	iolatency_clear_scaling(blkg);
1013d7067512SJosef Bacik }
1014d7067512SJosef Bacik 
1015d7067512SJosef Bacik static void iolatency_pd_free(struct blkg_policy_data *pd)
1016d7067512SJosef Bacik {
1017d7067512SJosef Bacik 	struct iolatency_grp *iolat = pd_to_lat(pd);
1018d7067512SJosef Bacik 	free_percpu(iolat->stats);
1019d7067512SJosef Bacik 	kfree(iolat);
1020d7067512SJosef Bacik }
1021d7067512SJosef Bacik 
1022d7067512SJosef Bacik static struct cftype iolatency_files[] = {
1023d7067512SJosef Bacik 	{
1024d7067512SJosef Bacik 		.name = "latency",
1025d7067512SJosef Bacik 		.flags = CFTYPE_NOT_ON_ROOT,
1026d7067512SJosef Bacik 		.seq_show = iolatency_print_limit,
1027d7067512SJosef Bacik 		.write = iolatency_set_limit,
1028d7067512SJosef Bacik 	},
1029d7067512SJosef Bacik 	{}
1030d7067512SJosef Bacik };
1031d7067512SJosef Bacik 
1032d7067512SJosef Bacik static struct blkcg_policy blkcg_policy_iolatency = {
1033d7067512SJosef Bacik 	.dfl_cftypes	= iolatency_files,
1034d7067512SJosef Bacik 	.pd_alloc_fn	= iolatency_pd_alloc,
1035d7067512SJosef Bacik 	.pd_init_fn	= iolatency_pd_init,
1036d7067512SJosef Bacik 	.pd_offline_fn	= iolatency_pd_offline,
1037d7067512SJosef Bacik 	.pd_free_fn	= iolatency_pd_free,
1038d7067512SJosef Bacik 	.pd_stat_fn	= iolatency_pd_stat,
1039d7067512SJosef Bacik };
1040d7067512SJosef Bacik 
1041d7067512SJosef Bacik static int __init iolatency_init(void)
1042d7067512SJosef Bacik {
1043d7067512SJosef Bacik 	return blkcg_policy_register(&blkcg_policy_iolatency);
1044d7067512SJosef Bacik }
1045d7067512SJosef Bacik 
1046d7067512SJosef Bacik static void __exit iolatency_exit(void)
1047d7067512SJosef Bacik {
1048d7067512SJosef Bacik 	return blkcg_policy_unregister(&blkcg_policy_iolatency);
1049d7067512SJosef Bacik }
1050d7067512SJosef Bacik 
1051d7067512SJosef Bacik module_init(iolatency_init);
1052d7067512SJosef Bacik module_exit(iolatency_exit);
1053