13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2d7067512SJosef Bacik /*
3d7067512SJosef Bacik * Block rq-qos base io controller
4d7067512SJosef Bacik *
5d7067512SJosef Bacik * This works similar to wbt with a few exceptions
6d7067512SJosef Bacik *
7d7067512SJosef Bacik * - It's bio based, so the latency covers the whole block layer in addition to
8d7067512SJosef Bacik * the actual io.
9d7067512SJosef Bacik * - We will throttle all IO that comes in here if we need to.
10d7067512SJosef Bacik * - We use the mean latency over the 100ms window. This is because writes can
11d7067512SJosef Bacik * be particularly fast, which could give us a false sense of the impact of
12d7067512SJosef Bacik * other workloads on our protected workload.
13a284390bSJosef Bacik * - By default there's no throttling, we set the queue_depth to UINT_MAX so
14a284390bSJosef Bacik * that we can have as many outstanding bio's as we're allowed to. Only at
15d7067512SJosef Bacik * throttle time do we pay attention to the actual queue depth.
16d7067512SJosef Bacik *
17d7067512SJosef Bacik * The hierarchy works like the cpu controller does, we track the latency at
18d7067512SJosef Bacik * every configured node, and each configured node has it's own independent
19d7067512SJosef Bacik * queue depth. This means that we only care about our latency targets at the
20d7067512SJosef Bacik * peer level. Some group at the bottom of the hierarchy isn't going to affect
21d7067512SJosef Bacik * a group at the end of some other path if we're only configred at leaf level.
22d7067512SJosef Bacik *
23d7067512SJosef Bacik * Consider the following
24d7067512SJosef Bacik *
25d7067512SJosef Bacik * root blkg
26d7067512SJosef Bacik * / \
27d7067512SJosef Bacik * fast (target=5ms) slow (target=10ms)
28d7067512SJosef Bacik * / \ / \
29d7067512SJosef Bacik * a b normal(15ms) unloved
30d7067512SJosef Bacik *
31d7067512SJosef Bacik * "a" and "b" have no target, but their combined io under "fast" cannot exceed
32d7067512SJosef Bacik * an average latency of 5ms. If it does then we will throttle the "slow"
33d7067512SJosef Bacik * group. In the case of "normal", if it exceeds its 15ms target, we will
34d7067512SJosef Bacik * throttle "unloved", but nobody else.
35d7067512SJosef Bacik *
36d7067512SJosef Bacik * In this example "fast", "slow", and "normal" will be the only groups actually
37d7067512SJosef Bacik * accounting their io latencies. We have to walk up the heirarchy to the root
38d7067512SJosef Bacik * on every submit and complete so we can do the appropriate stat recording and
39d7067512SJosef Bacik * adjust the queue depth of ourselves if needed.
40d7067512SJosef Bacik *
41d7067512SJosef Bacik * There are 2 ways we throttle IO.
42d7067512SJosef Bacik *
43d7067512SJosef Bacik * 1) Queue depth throttling. As we throttle down we will adjust the maximum
44d7067512SJosef Bacik * number of IO's we're allowed to have in flight. This starts at (u64)-1 down
45d7067512SJosef Bacik * to 1. If the group is only ever submitting IO for itself then this is the
46d7067512SJosef Bacik * only way we throttle.
47d7067512SJosef Bacik *
48d7067512SJosef Bacik * 2) Induced delay throttling. This is for the case that a group is generating
49d7067512SJosef Bacik * IO that has to be issued by the root cg to avoid priority inversion. So think
50d7067512SJosef Bacik * REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot
51d7067512SJosef Bacik * of work done for us on behalf of the root cg and are being asked to scale
52d7067512SJosef Bacik * down more then we induce a latency at userspace return. We accumulate the
53d7067512SJosef Bacik * total amount of time we need to be punished by doing
54d7067512SJosef Bacik *
55d7067512SJosef Bacik * total_time += min_lat_nsec - actual_io_completion
56d7067512SJosef Bacik *
57d7067512SJosef Bacik * and then at throttle time will do
58d7067512SJosef Bacik *
59d7067512SJosef Bacik * throttle_time = min(total_time, NSEC_PER_SEC)
60d7067512SJosef Bacik *
61d7067512SJosef Bacik * This induced delay will throttle back the activity that is generating the
62d7067512SJosef Bacik * root cg issued io's, wethere that's some metadata intensive operation or the
63d7067512SJosef Bacik * group is using so much memory that it is pushing us into swap.
64d7067512SJosef Bacik *
65d7067512SJosef Bacik * Copyright (C) 2018 Josef Bacik
66d7067512SJosef Bacik */
67d7067512SJosef Bacik #include <linux/kernel.h>
68d7067512SJosef Bacik #include <linux/blk_types.h>
69d7067512SJosef Bacik #include <linux/backing-dev.h>
70d7067512SJosef Bacik #include <linux/module.h>
71d7067512SJosef Bacik #include <linux/timer.h>
72d7067512SJosef Bacik #include <linux/memcontrol.h>
73c480bcf9SDennis Zhou (Facebook) #include <linux/sched/loadavg.h>
74d7067512SJosef Bacik #include <linux/sched/signal.h>
75d7067512SJosef Bacik #include <trace/events/block.h>
768c772a9bSLiu Bo #include <linux/blk-mq.h>
77d7067512SJosef Bacik #include "blk-rq-qos.h"
78d7067512SJosef Bacik #include "blk-stat.h"
79672fdcf0SMing Lei #include "blk-cgroup.h"
80373e915cSBart Van Assche #include "blk.h"
81d7067512SJosef Bacik
82d7067512SJosef Bacik #define DEFAULT_SCALE_COOKIE 1000000U
83d7067512SJosef Bacik
84d7067512SJosef Bacik static struct blkcg_policy blkcg_policy_iolatency;
85d7067512SJosef Bacik struct iolatency_grp;
86d7067512SJosef Bacik
87d7067512SJosef Bacik struct blk_iolatency {
88d7067512SJosef Bacik struct rq_qos rqos;
89d7067512SJosef Bacik struct timer_list timer;
908a177a36STejun Heo
918a177a36STejun Heo /*
928a177a36STejun Heo * ->enabled is the master enable switch gating the throttling logic and
938a177a36STejun Heo * inflight tracking. The number of cgroups which have iolat enabled is
948a177a36STejun Heo * tracked in ->enable_cnt, and ->enable is flipped on/off accordingly
958a177a36STejun Heo * from ->enable_work with the request_queue frozen. For details, See
968a177a36STejun Heo * blkiolatency_enable_work_fn().
978a177a36STejun Heo */
988a177a36STejun Heo bool enabled;
998a177a36STejun Heo atomic_t enable_cnt;
1008a177a36STejun Heo struct work_struct enable_work;
101d7067512SJosef Bacik };
102d7067512SJosef Bacik
BLKIOLATENCY(struct rq_qos * rqos)103d7067512SJosef Bacik static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
104d7067512SJosef Bacik {
105d7067512SJosef Bacik return container_of(rqos, struct blk_iolatency, rqos);
106d7067512SJosef Bacik }
107d7067512SJosef Bacik
108d7067512SJosef Bacik struct child_latency_info {
109d7067512SJosef Bacik spinlock_t lock;
110d7067512SJosef Bacik
111d7067512SJosef Bacik /* Last time we adjusted the scale of everybody. */
112d7067512SJosef Bacik u64 last_scale_event;
113d7067512SJosef Bacik
114d7067512SJosef Bacik /* The latency that we missed. */
115d7067512SJosef Bacik u64 scale_lat;
116d7067512SJosef Bacik
117d7067512SJosef Bacik /* Total io's from all of our children for the last summation. */
118d7067512SJosef Bacik u64 nr_samples;
119d7067512SJosef Bacik
120d7067512SJosef Bacik /* The guy who actually changed the latency numbers. */
121d7067512SJosef Bacik struct iolatency_grp *scale_grp;
122d7067512SJosef Bacik
123d7067512SJosef Bacik /* Cookie to tell if we need to scale up or down. */
124d7067512SJosef Bacik atomic_t scale_cookie;
125d7067512SJosef Bacik };
126d7067512SJosef Bacik
1271fa2840eSJosef Bacik struct percentile_stats {
1281fa2840eSJosef Bacik u64 total;
1291fa2840eSJosef Bacik u64 missed;
1301fa2840eSJosef Bacik };
1311fa2840eSJosef Bacik
1321fa2840eSJosef Bacik struct latency_stat {
1331fa2840eSJosef Bacik union {
1341fa2840eSJosef Bacik struct percentile_stats ps;
1351fa2840eSJosef Bacik struct blk_rq_stat rqs;
1361fa2840eSJosef Bacik };
1371fa2840eSJosef Bacik };
1381fa2840eSJosef Bacik
139d7067512SJosef Bacik struct iolatency_grp {
140d7067512SJosef Bacik struct blkg_policy_data pd;
1411fa2840eSJosef Bacik struct latency_stat __percpu *stats;
142451bb7c3SJosef Bacik struct latency_stat cur_stat;
143d7067512SJosef Bacik struct blk_iolatency *blkiolat;
144dc572f41SKemeng Shi unsigned int max_depth;
145d7067512SJosef Bacik struct rq_wait rq_wait;
146d7067512SJosef Bacik atomic64_t window_start;
147d7067512SJosef Bacik atomic_t scale_cookie;
148d7067512SJosef Bacik u64 min_lat_nsec;
149d7067512SJosef Bacik u64 cur_win_nsec;
150d7067512SJosef Bacik
151d7067512SJosef Bacik /* total running average of our io latency. */
152c480bcf9SDennis Zhou (Facebook) u64 lat_avg;
153d7067512SJosef Bacik
154d7067512SJosef Bacik /* Our current number of IO's for the last summation. */
155d7067512SJosef Bacik u64 nr_samples;
156d7067512SJosef Bacik
1571fa2840eSJosef Bacik bool ssd;
158d7067512SJosef Bacik struct child_latency_info child_lat;
159d7067512SJosef Bacik };
160d7067512SJosef Bacik
161c480bcf9SDennis Zhou (Facebook) #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
162c480bcf9SDennis Zhou (Facebook) #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
163c480bcf9SDennis Zhou (Facebook) /*
164c480bcf9SDennis Zhou (Facebook) * These are the constants used to fake the fixed-point moving average
1658508cf3fSJohannes Weiner * calculation just like load average. The call to calc_load() folds
166c480bcf9SDennis Zhou (Facebook) * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling
167c480bcf9SDennis Zhou (Facebook) * window size is bucketed to try to approximately calculate average
168c480bcf9SDennis Zhou (Facebook) * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
169c480bcf9SDennis Zhou (Facebook) * elapse immediately. Note, windows only elapse with IO activity. Idle
170c480bcf9SDennis Zhou (Facebook) * periods extend the most recent window.
171c480bcf9SDennis Zhou (Facebook) */
172c480bcf9SDennis Zhou (Facebook) #define BLKIOLATENCY_NR_EXP_FACTORS 5
173c480bcf9SDennis Zhou (Facebook) #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
174c480bcf9SDennis Zhou (Facebook) (BLKIOLATENCY_NR_EXP_FACTORS - 1))
175c480bcf9SDennis Zhou (Facebook) static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
176c480bcf9SDennis Zhou (Facebook) 2045, // exp(1/600) - 600 samples
177c480bcf9SDennis Zhou (Facebook) 2039, // exp(1/240) - 240 samples
178c480bcf9SDennis Zhou (Facebook) 2031, // exp(1/120) - 120 samples
179c480bcf9SDennis Zhou (Facebook) 2023, // exp(1/80) - 80 samples
180c480bcf9SDennis Zhou (Facebook) 2014, // exp(1/60) - 60 samples
181c480bcf9SDennis Zhou (Facebook) };
182c480bcf9SDennis Zhou (Facebook)
pd_to_lat(struct blkg_policy_data * pd)183d7067512SJosef Bacik static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
184d7067512SJosef Bacik {
185d7067512SJosef Bacik return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
186d7067512SJosef Bacik }
187d7067512SJosef Bacik
blkg_to_lat(struct blkcg_gq * blkg)188d7067512SJosef Bacik static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
189d7067512SJosef Bacik {
190d7067512SJosef Bacik return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
191d7067512SJosef Bacik }
192d7067512SJosef Bacik
lat_to_blkg(struct iolatency_grp * iolat)193d7067512SJosef Bacik static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
194d7067512SJosef Bacik {
195d7067512SJosef Bacik return pd_to_blkg(&iolat->pd);
196d7067512SJosef Bacik }
197d7067512SJosef Bacik
latency_stat_init(struct iolatency_grp * iolat,struct latency_stat * stat)1981fa2840eSJosef Bacik static inline void latency_stat_init(struct iolatency_grp *iolat,
1991fa2840eSJosef Bacik struct latency_stat *stat)
2001fa2840eSJosef Bacik {
2011fa2840eSJosef Bacik if (iolat->ssd) {
2021fa2840eSJosef Bacik stat->ps.total = 0;
2031fa2840eSJosef Bacik stat->ps.missed = 0;
2041fa2840eSJosef Bacik } else
2051fa2840eSJosef Bacik blk_rq_stat_init(&stat->rqs);
2061fa2840eSJosef Bacik }
2071fa2840eSJosef Bacik
latency_stat_sum(struct iolatency_grp * iolat,struct latency_stat * sum,struct latency_stat * stat)2081fa2840eSJosef Bacik static inline void latency_stat_sum(struct iolatency_grp *iolat,
2091fa2840eSJosef Bacik struct latency_stat *sum,
2101fa2840eSJosef Bacik struct latency_stat *stat)
2111fa2840eSJosef Bacik {
2121fa2840eSJosef Bacik if (iolat->ssd) {
2131fa2840eSJosef Bacik sum->ps.total += stat->ps.total;
2141fa2840eSJosef Bacik sum->ps.missed += stat->ps.missed;
2151fa2840eSJosef Bacik } else
2161fa2840eSJosef Bacik blk_rq_stat_sum(&sum->rqs, &stat->rqs);
2171fa2840eSJosef Bacik }
2181fa2840eSJosef Bacik
latency_stat_record_time(struct iolatency_grp * iolat,u64 req_time)2191fa2840eSJosef Bacik static inline void latency_stat_record_time(struct iolatency_grp *iolat,
2201fa2840eSJosef Bacik u64 req_time)
2211fa2840eSJosef Bacik {
2221fa2840eSJosef Bacik struct latency_stat *stat = get_cpu_ptr(iolat->stats);
2231fa2840eSJosef Bacik if (iolat->ssd) {
2241fa2840eSJosef Bacik if (req_time >= iolat->min_lat_nsec)
2251fa2840eSJosef Bacik stat->ps.missed++;
2261fa2840eSJosef Bacik stat->ps.total++;
2271fa2840eSJosef Bacik } else
2281fa2840eSJosef Bacik blk_rq_stat_add(&stat->rqs, req_time);
2291fa2840eSJosef Bacik put_cpu_ptr(stat);
2301fa2840eSJosef Bacik }
2311fa2840eSJosef Bacik
latency_sum_ok(struct iolatency_grp * iolat,struct latency_stat * stat)2321fa2840eSJosef Bacik static inline bool latency_sum_ok(struct iolatency_grp *iolat,
2331fa2840eSJosef Bacik struct latency_stat *stat)
2341fa2840eSJosef Bacik {
2351fa2840eSJosef Bacik if (iolat->ssd) {
2361fa2840eSJosef Bacik u64 thresh = div64_u64(stat->ps.total, 10);
2371fa2840eSJosef Bacik thresh = max(thresh, 1ULL);
2381fa2840eSJosef Bacik return stat->ps.missed < thresh;
2391fa2840eSJosef Bacik }
2401fa2840eSJosef Bacik return stat->rqs.mean <= iolat->min_lat_nsec;
2411fa2840eSJosef Bacik }
2421fa2840eSJosef Bacik
latency_stat_samples(struct iolatency_grp * iolat,struct latency_stat * stat)2431fa2840eSJosef Bacik static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
2441fa2840eSJosef Bacik struct latency_stat *stat)
2451fa2840eSJosef Bacik {
2461fa2840eSJosef Bacik if (iolat->ssd)
2471fa2840eSJosef Bacik return stat->ps.total;
2481fa2840eSJosef Bacik return stat->rqs.nr_samples;
2491fa2840eSJosef Bacik }
2501fa2840eSJosef Bacik
iolat_update_total_lat_avg(struct iolatency_grp * iolat,struct latency_stat * stat)2511fa2840eSJosef Bacik static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
2521fa2840eSJosef Bacik struct latency_stat *stat)
2531fa2840eSJosef Bacik {
2541fa2840eSJosef Bacik int exp_idx;
2551fa2840eSJosef Bacik
2561fa2840eSJosef Bacik if (iolat->ssd)
2571fa2840eSJosef Bacik return;
2581fa2840eSJosef Bacik
2591fa2840eSJosef Bacik /*
2608508cf3fSJohannes Weiner * calc_load() takes in a number stored in fixed point representation.
2611fa2840eSJosef Bacik * Because we are using this for IO time in ns, the values stored
2621fa2840eSJosef Bacik * are significantly larger than the FIXED_1 denominator (2048).
2631fa2840eSJosef Bacik * Therefore, rounding errors in the calculation are negligible and
2641fa2840eSJosef Bacik * can be ignored.
2651fa2840eSJosef Bacik */
2661fa2840eSJosef Bacik exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
2671fa2840eSJosef Bacik div64_u64(iolat->cur_win_nsec,
2681fa2840eSJosef Bacik BLKIOLATENCY_EXP_BUCKET_SIZE));
2698508cf3fSJohannes Weiner iolat->lat_avg = calc_load(iolat->lat_avg,
2708508cf3fSJohannes Weiner iolatency_exp_factors[exp_idx],
2718508cf3fSJohannes Weiner stat->rqs.mean);
2721fa2840eSJosef Bacik }
2731fa2840eSJosef Bacik
iolat_cleanup_cb(struct rq_wait * rqw,void * private_data)274d3fcdff1SJosef Bacik static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data)
275d7067512SJosef Bacik {
276d3fcdff1SJosef Bacik atomic_dec(&rqw->inflight);
277d3fcdff1SJosef Bacik wake_up(&rqw->wait);
278d3fcdff1SJosef Bacik }
279d7067512SJosef Bacik
iolat_acquire_inflight(struct rq_wait * rqw,void * private_data)280d3fcdff1SJosef Bacik static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data)
281d3fcdff1SJosef Bacik {
282d3fcdff1SJosef Bacik struct iolatency_grp *iolat = private_data;
283dc572f41SKemeng Shi return rq_wait_inc_below(rqw, iolat->max_depth);
284d7067512SJosef Bacik }
285d7067512SJosef Bacik
__blkcg_iolatency_throttle(struct rq_qos * rqos,struct iolatency_grp * iolat,bool issue_as_root,bool use_memdelay)286d7067512SJosef Bacik static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
287d7067512SJosef Bacik struct iolatency_grp *iolat,
288d5337560SChristoph Hellwig bool issue_as_root,
289d7067512SJosef Bacik bool use_memdelay)
290d7067512SJosef Bacik {
291d7067512SJosef Bacik struct rq_wait *rqw = &iolat->rq_wait;
292d7067512SJosef Bacik unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
293d7067512SJosef Bacik
294d7067512SJosef Bacik if (use_delay)
295ba91c849SChristoph Hellwig blkcg_schedule_throttle(rqos->disk, use_memdelay);
296d7067512SJosef Bacik
297d7067512SJosef Bacik /*
298d7067512SJosef Bacik * To avoid priority inversions we want to just take a slot if we are
299d7067512SJosef Bacik * issuing as root. If we're being killed off there's no point in
300d7067512SJosef Bacik * delaying things, we may have been killed by OOM so throttling may
301d7067512SJosef Bacik * make recovery take even longer, so just let the IO's through so the
302d7067512SJosef Bacik * task can go away.
303d7067512SJosef Bacik */
304d7067512SJosef Bacik if (issue_as_root || fatal_signal_pending(current)) {
305d7067512SJosef Bacik atomic_inc(&rqw->inflight);
306d7067512SJosef Bacik return;
307d7067512SJosef Bacik }
308d7067512SJosef Bacik
309d3fcdff1SJosef Bacik rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
310d7067512SJosef Bacik }
311d7067512SJosef Bacik
312d7067512SJosef Bacik #define SCALE_DOWN_FACTOR 2
313d7067512SJosef Bacik #define SCALE_UP_FACTOR 4
314d7067512SJosef Bacik
scale_amount(unsigned long qd,bool up)315d7067512SJosef Bacik static inline unsigned long scale_amount(unsigned long qd, bool up)
316d7067512SJosef Bacik {
317d7067512SJosef Bacik return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
318d7067512SJosef Bacik }
319d7067512SJosef Bacik
320d7067512SJosef Bacik /*
321d7067512SJosef Bacik * We scale the qd down faster than we scale up, so we need to use this helper
322d7067512SJosef Bacik * to adjust the scale_cookie accordingly so we don't prematurely get
323d7067512SJosef Bacik * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
324d7067512SJosef Bacik *
325d7067512SJosef Bacik * Each group has their own local copy of the last scale cookie they saw, so if
326d7067512SJosef Bacik * the global scale cookie goes up or down they know which way they need to go
327d7067512SJosef Bacik * based on their last knowledge of it.
328d7067512SJosef Bacik */
scale_cookie_change(struct blk_iolatency * blkiolat,struct child_latency_info * lat_info,bool up)329d7067512SJosef Bacik static void scale_cookie_change(struct blk_iolatency *blkiolat,
330d7067512SJosef Bacik struct child_latency_info *lat_info,
331d7067512SJosef Bacik bool up)
332d7067512SJosef Bacik {
333ba91c849SChristoph Hellwig unsigned long qd = blkiolat->rqos.disk->queue->nr_requests;
334d7067512SJosef Bacik unsigned long scale = scale_amount(qd, up);
335d7067512SJosef Bacik unsigned long old = atomic_read(&lat_info->scale_cookie);
336d7067512SJosef Bacik unsigned long max_scale = qd << 1;
337d7067512SJosef Bacik unsigned long diff = 0;
338d7067512SJosef Bacik
339d7067512SJosef Bacik if (old < DEFAULT_SCALE_COOKIE)
340d7067512SJosef Bacik diff = DEFAULT_SCALE_COOKIE - old;
341d7067512SJosef Bacik
342d7067512SJosef Bacik if (up) {
343d7067512SJosef Bacik if (scale + old > DEFAULT_SCALE_COOKIE)
344d7067512SJosef Bacik atomic_set(&lat_info->scale_cookie,
345d7067512SJosef Bacik DEFAULT_SCALE_COOKIE);
346d7067512SJosef Bacik else if (diff > qd)
347d7067512SJosef Bacik atomic_inc(&lat_info->scale_cookie);
348d7067512SJosef Bacik else
349d7067512SJosef Bacik atomic_add(scale, &lat_info->scale_cookie);
350d7067512SJosef Bacik } else {
351d7067512SJosef Bacik /*
352d7067512SJosef Bacik * We don't want to dig a hole so deep that it takes us hours to
353d7067512SJosef Bacik * dig out of it. Just enough that we don't throttle/unthrottle
354d7067512SJosef Bacik * with jagged workloads but can still unthrottle once pressure
355d7067512SJosef Bacik * has sufficiently dissipated.
356d7067512SJosef Bacik */
357d7067512SJosef Bacik if (diff > qd) {
358d7067512SJosef Bacik if (diff < max_scale)
359d7067512SJosef Bacik atomic_dec(&lat_info->scale_cookie);
360d7067512SJosef Bacik } else {
361d7067512SJosef Bacik atomic_sub(scale, &lat_info->scale_cookie);
362d7067512SJosef Bacik }
363d7067512SJosef Bacik }
364d7067512SJosef Bacik }
365d7067512SJosef Bacik
366d7067512SJosef Bacik /*
3676891f968SKemeng Shi * Change the queue depth of the iolatency_grp. We add 1/16th of the
368d7067512SJosef Bacik * queue depth at a time so we don't get wild swings and hopefully dial in to
3696891f968SKemeng Shi * fairer distribution of the overall queue depth. We halve the queue depth
3706891f968SKemeng Shi * at a time so we can scale down queue depth quickly from default unlimited
3716891f968SKemeng Shi * to target.
372d7067512SJosef Bacik */
scale_change(struct iolatency_grp * iolat,bool up)373d7067512SJosef Bacik static void scale_change(struct iolatency_grp *iolat, bool up)
374d7067512SJosef Bacik {
375ba91c849SChristoph Hellwig unsigned long qd = iolat->blkiolat->rqos.disk->queue->nr_requests;
376d7067512SJosef Bacik unsigned long scale = scale_amount(qd, up);
377dc572f41SKemeng Shi unsigned long old = iolat->max_depth;
378d7067512SJosef Bacik
379d7067512SJosef Bacik if (old > qd)
380d7067512SJosef Bacik old = qd;
381d7067512SJosef Bacik
382d7067512SJosef Bacik if (up) {
383d7067512SJosef Bacik if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
384d7067512SJosef Bacik return;
385d7067512SJosef Bacik
386d7067512SJosef Bacik if (old < qd) {
387d7067512SJosef Bacik old += scale;
388d7067512SJosef Bacik old = min(old, qd);
389dc572f41SKemeng Shi iolat->max_depth = old;
390d7067512SJosef Bacik wake_up_all(&iolat->rq_wait.wait);
391d7067512SJosef Bacik }
3929f60511aSJosef Bacik } else {
393d7067512SJosef Bacik old >>= 1;
394dc572f41SKemeng Shi iolat->max_depth = max(old, 1UL);
395d7067512SJosef Bacik }
396d7067512SJosef Bacik }
397d7067512SJosef Bacik
398d7067512SJosef Bacik /* Check our parent and see if the scale cookie has changed. */
check_scale_change(struct iolatency_grp * iolat)399d7067512SJosef Bacik static void check_scale_change(struct iolatency_grp *iolat)
400d7067512SJosef Bacik {
401d7067512SJosef Bacik struct iolatency_grp *parent;
402d7067512SJosef Bacik struct child_latency_info *lat_info;
403d7067512SJosef Bacik unsigned int cur_cookie;
404d7067512SJosef Bacik unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
405d7067512SJosef Bacik u64 scale_lat;
406d7067512SJosef Bacik int direction = 0;
407d7067512SJosef Bacik
408d7067512SJosef Bacik parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
409d7067512SJosef Bacik if (!parent)
410d7067512SJosef Bacik return;
411d7067512SJosef Bacik
412d7067512SJosef Bacik lat_info = &parent->child_lat;
413d7067512SJosef Bacik cur_cookie = atomic_read(&lat_info->scale_cookie);
414d7067512SJosef Bacik scale_lat = READ_ONCE(lat_info->scale_lat);
415d7067512SJosef Bacik
416d7067512SJosef Bacik if (cur_cookie < our_cookie)
417d7067512SJosef Bacik direction = -1;
418d7067512SJosef Bacik else if (cur_cookie > our_cookie)
419d7067512SJosef Bacik direction = 1;
420d7067512SJosef Bacik else
421d7067512SJosef Bacik return;
422d7067512SJosef Bacik
423aee8960cSUros Bizjak if (!atomic_try_cmpxchg(&iolat->scale_cookie, &our_cookie, cur_cookie)) {
424d7067512SJosef Bacik /* Somebody beat us to the punch, just bail. */
425d7067512SJosef Bacik return;
426aee8960cSUros Bizjak }
427d7067512SJosef Bacik
428d7067512SJosef Bacik if (direction < 0 && iolat->min_lat_nsec) {
429d7067512SJosef Bacik u64 samples_thresh;
430d7067512SJosef Bacik
431d7067512SJosef Bacik if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
432d7067512SJosef Bacik return;
433d7067512SJosef Bacik
434d7067512SJosef Bacik /*
435d7067512SJosef Bacik * Sometimes high priority groups are their own worst enemy, so
436d7067512SJosef Bacik * instead of taking it out on some poor other group that did 5%
437d7067512SJosef Bacik * or less of the IO's for the last summation just skip this
438d7067512SJosef Bacik * scale down event.
439d7067512SJosef Bacik */
440d7067512SJosef Bacik samples_thresh = lat_info->nr_samples * 5;
44122ed8a93SJosef Bacik samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
442d7067512SJosef Bacik if (iolat->nr_samples <= samples_thresh)
443d7067512SJosef Bacik return;
444d7067512SJosef Bacik }
445d7067512SJosef Bacik
446d7067512SJosef Bacik /* We're as low as we can go. */
447dc572f41SKemeng Shi if (iolat->max_depth == 1 && direction < 0) {
448d7067512SJosef Bacik blkcg_use_delay(lat_to_blkg(iolat));
449d7067512SJosef Bacik return;
450d7067512SJosef Bacik }
451d7067512SJosef Bacik
452d7067512SJosef Bacik /* We're back to the default cookie, unthrottle all the things. */
453d7067512SJosef Bacik if (cur_cookie == DEFAULT_SCALE_COOKIE) {
454d7067512SJosef Bacik blkcg_clear_delay(lat_to_blkg(iolat));
455dc572f41SKemeng Shi iolat->max_depth = UINT_MAX;
456d7067512SJosef Bacik wake_up_all(&iolat->rq_wait.wait);
457d7067512SJosef Bacik return;
458d7067512SJosef Bacik }
459d7067512SJosef Bacik
460d7067512SJosef Bacik scale_change(iolat, direction > 0);
461d7067512SJosef Bacik }
462d7067512SJosef Bacik
blkcg_iolatency_throttle(struct rq_qos * rqos,struct bio * bio)463d5337560SChristoph Hellwig static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
464d7067512SJosef Bacik {
465d7067512SJosef Bacik struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
4665cdf2e3fSDennis Zhou struct blkcg_gq *blkg = bio->bi_blkg;
467d7067512SJosef Bacik bool issue_as_root = bio_issue_as_root_blkg(bio);
468d7067512SJosef Bacik
4698a177a36STejun Heo if (!blkiolat->enabled)
470d7067512SJosef Bacik return;
471d7067512SJosef Bacik
472d7067512SJosef Bacik while (blkg && blkg->parent) {
473d7067512SJosef Bacik struct iolatency_grp *iolat = blkg_to_lat(blkg);
474d7067512SJosef Bacik if (!iolat) {
475d7067512SJosef Bacik blkg = blkg->parent;
476d7067512SJosef Bacik continue;
477d7067512SJosef Bacik }
478d7067512SJosef Bacik
479d7067512SJosef Bacik check_scale_change(iolat);
480d5337560SChristoph Hellwig __blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
481d7067512SJosef Bacik (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
482d7067512SJosef Bacik blkg = blkg->parent;
483d7067512SJosef Bacik }
484d7067512SJosef Bacik if (!timer_pending(&blkiolat->timer))
485d7067512SJosef Bacik mod_timer(&blkiolat->timer, jiffies + HZ);
486d7067512SJosef Bacik }
487d7067512SJosef Bacik
iolatency_record_time(struct iolatency_grp * iolat,struct bio_issue * issue,u64 now,bool issue_as_root)488d7067512SJosef Bacik static void iolatency_record_time(struct iolatency_grp *iolat,
489d7067512SJosef Bacik struct bio_issue *issue, u64 now,
490d7067512SJosef Bacik bool issue_as_root)
491d7067512SJosef Bacik {
492d7067512SJosef Bacik u64 start = bio_issue_time(issue);
493d7067512SJosef Bacik u64 req_time;
494d7067512SJosef Bacik
49571e9690bSJosef Bacik /*
49671e9690bSJosef Bacik * Have to do this so we are truncated to the correct time that our
49771e9690bSJosef Bacik * issue is truncated to.
49871e9690bSJosef Bacik */
49971e9690bSJosef Bacik now = __bio_issue_time(now);
50071e9690bSJosef Bacik
501d7067512SJosef Bacik if (now <= start)
502d7067512SJosef Bacik return;
503d7067512SJosef Bacik
504d7067512SJosef Bacik req_time = now - start;
505d7067512SJosef Bacik
506d7067512SJosef Bacik /*
507d7067512SJosef Bacik * We don't want to count issue_as_root bio's in the cgroups latency
508d7067512SJosef Bacik * statistics as it could skew the numbers downwards.
509d7067512SJosef Bacik */
510dc572f41SKemeng Shi if (unlikely(issue_as_root && iolat->max_depth != UINT_MAX)) {
511d7067512SJosef Bacik u64 sub = iolat->min_lat_nsec;
512d7067512SJosef Bacik if (req_time < sub)
513d7067512SJosef Bacik blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
514d7067512SJosef Bacik return;
515d7067512SJosef Bacik }
516d7067512SJosef Bacik
5171fa2840eSJosef Bacik latency_stat_record_time(iolat, req_time);
518d7067512SJosef Bacik }
519d7067512SJosef Bacik
520d7067512SJosef Bacik #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
521d7067512SJosef Bacik #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
522d7067512SJosef Bacik
iolatency_check_latencies(struct iolatency_grp * iolat,u64 now)523d7067512SJosef Bacik static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
524d7067512SJosef Bacik {
525d7067512SJosef Bacik struct blkcg_gq *blkg = lat_to_blkg(iolat);
526d7067512SJosef Bacik struct iolatency_grp *parent;
527d7067512SJosef Bacik struct child_latency_info *lat_info;
5281fa2840eSJosef Bacik struct latency_stat stat;
529d7067512SJosef Bacik unsigned long flags;
5301fa2840eSJosef Bacik int cpu;
531d7067512SJosef Bacik
5321fa2840eSJosef Bacik latency_stat_init(iolat, &stat);
533d7067512SJosef Bacik preempt_disable();
534d7067512SJosef Bacik for_each_online_cpu(cpu) {
5351fa2840eSJosef Bacik struct latency_stat *s;
536d7067512SJosef Bacik s = per_cpu_ptr(iolat->stats, cpu);
5371fa2840eSJosef Bacik latency_stat_sum(iolat, &stat, s);
5381fa2840eSJosef Bacik latency_stat_init(iolat, s);
539d7067512SJosef Bacik }
540d7067512SJosef Bacik preempt_enable();
541d7067512SJosef Bacik
542d7067512SJosef Bacik parent = blkg_to_lat(blkg->parent);
543d7067512SJosef Bacik if (!parent)
544d7067512SJosef Bacik return;
545d7067512SJosef Bacik
546d7067512SJosef Bacik lat_info = &parent->child_lat;
547d7067512SJosef Bacik
5481fa2840eSJosef Bacik iolat_update_total_lat_avg(iolat, &stat);
549d7067512SJosef Bacik
550d7067512SJosef Bacik /* Everything is ok and we don't need to adjust the scale. */
5511fa2840eSJosef Bacik if (latency_sum_ok(iolat, &stat) &&
552d7067512SJosef Bacik atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
553d7067512SJosef Bacik return;
554d7067512SJosef Bacik
555d7067512SJosef Bacik /* Somebody beat us to the punch, just bail. */
556d7067512SJosef Bacik spin_lock_irqsave(&lat_info->lock, flags);
557451bb7c3SJosef Bacik
558451bb7c3SJosef Bacik latency_stat_sum(iolat, &iolat->cur_stat, &stat);
559d7067512SJosef Bacik lat_info->nr_samples -= iolat->nr_samples;
560451bb7c3SJosef Bacik lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
561451bb7c3SJosef Bacik iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
562d7067512SJosef Bacik
563d7067512SJosef Bacik if ((lat_info->last_scale_event >= now ||
564451bb7c3SJosef Bacik now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
565d7067512SJosef Bacik goto out;
566d7067512SJosef Bacik
567451bb7c3SJosef Bacik if (latency_sum_ok(iolat, &iolat->cur_stat) &&
568451bb7c3SJosef Bacik latency_sum_ok(iolat, &stat)) {
569451bb7c3SJosef Bacik if (latency_stat_samples(iolat, &iolat->cur_stat) <
5701fa2840eSJosef Bacik BLKIOLATENCY_MIN_GOOD_SAMPLES)
5711fa2840eSJosef Bacik goto out;
572d7067512SJosef Bacik if (lat_info->scale_grp == iolat) {
573d7067512SJosef Bacik lat_info->last_scale_event = now;
574d7067512SJosef Bacik scale_cookie_change(iolat->blkiolat, lat_info, true);
575d7067512SJosef Bacik }
576451bb7c3SJosef Bacik } else if (lat_info->scale_lat == 0 ||
577451bb7c3SJosef Bacik lat_info->scale_lat >= iolat->min_lat_nsec) {
578d7067512SJosef Bacik lat_info->last_scale_event = now;
579d7067512SJosef Bacik if (!lat_info->scale_grp ||
580d7067512SJosef Bacik lat_info->scale_lat > iolat->min_lat_nsec) {
581d7067512SJosef Bacik WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
582d7067512SJosef Bacik lat_info->scale_grp = iolat;
583d7067512SJosef Bacik }
584d7067512SJosef Bacik scale_cookie_change(iolat->blkiolat, lat_info, false);
585d7067512SJosef Bacik }
586451bb7c3SJosef Bacik latency_stat_init(iolat, &iolat->cur_stat);
587d7067512SJosef Bacik out:
588d7067512SJosef Bacik spin_unlock_irqrestore(&lat_info->lock, flags);
589d7067512SJosef Bacik }
590d7067512SJosef Bacik
blkcg_iolatency_done_bio(struct rq_qos * rqos,struct bio * bio)591d7067512SJosef Bacik static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
592d7067512SJosef Bacik {
593d7067512SJosef Bacik struct blkcg_gq *blkg;
594d7067512SJosef Bacik struct rq_wait *rqw;
595d7067512SJosef Bacik struct iolatency_grp *iolat;
596d7067512SJosef Bacik u64 window_start;
5976e2fa4ddSHongnan Li u64 now;
598d7067512SJosef Bacik bool issue_as_root = bio_issue_as_root_blkg(bio);
599391f552aSLiu Bo int inflight = 0;
600d7067512SJosef Bacik
601d7067512SJosef Bacik blkg = bio->bi_blkg;
602aa1b46dcSTejun Heo if (!blkg || !bio_flagged(bio, BIO_QOS_THROTTLED))
603d7067512SJosef Bacik return;
604d7067512SJosef Bacik
605d7067512SJosef Bacik iolat = blkg_to_lat(bio->bi_blkg);
606d7067512SJosef Bacik if (!iolat)
607d7067512SJosef Bacik return;
608d7067512SJosef Bacik
6098a177a36STejun Heo if (!iolat->blkiolat->enabled)
6108c772a9bSLiu Bo return;
6118c772a9bSLiu Bo
6126e2fa4ddSHongnan Li now = ktime_to_ns(ktime_get());
613d7067512SJosef Bacik while (blkg && blkg->parent) {
614d7067512SJosef Bacik iolat = blkg_to_lat(blkg);
615d7067512SJosef Bacik if (!iolat) {
616d7067512SJosef Bacik blkg = blkg->parent;
617d7067512SJosef Bacik continue;
618d7067512SJosef Bacik }
619d7067512SJosef Bacik rqw = &iolat->rq_wait;
620d7067512SJosef Bacik
621391f552aSLiu Bo inflight = atomic_dec_return(&rqw->inflight);
622391f552aSLiu Bo WARN_ON_ONCE(inflight < 0);
623c9b3007fSDennis Zhou /*
624c9b3007fSDennis Zhou * If bi_status is BLK_STS_AGAIN, the bio wasn't actually
625c9b3007fSDennis Zhou * submitted, so do not account for it.
626c9b3007fSDennis Zhou */
627c9b3007fSDennis Zhou if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
628d7067512SJosef Bacik iolatency_record_time(iolat, &bio->bi_issue, now,
629d7067512SJosef Bacik issue_as_root);
630d7067512SJosef Bacik window_start = atomic64_read(&iolat->window_start);
631d7067512SJosef Bacik if (now > window_start &&
632d7067512SJosef Bacik (now - window_start) >= iolat->cur_win_nsec) {
633aee8960cSUros Bizjak if (atomic64_try_cmpxchg(&iolat->window_start,
634aee8960cSUros Bizjak &window_start, now))
635d7067512SJosef Bacik iolatency_check_latencies(iolat, now);
636d7067512SJosef Bacik }
637d7067512SJosef Bacik }
638d7067512SJosef Bacik wake_up(&rqw->wait);
639d7067512SJosef Bacik blkg = blkg->parent;
640d7067512SJosef Bacik }
641d7067512SJosef Bacik }
642d7067512SJosef Bacik
blkcg_iolatency_exit(struct rq_qos * rqos)643d7067512SJosef Bacik static void blkcg_iolatency_exit(struct rq_qos *rqos)
644d7067512SJosef Bacik {
645d7067512SJosef Bacik struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
646d7067512SJosef Bacik
647292a089dSSteven Rostedt (Google) timer_shutdown_sync(&blkiolat->timer);
6488a177a36STejun Heo flush_work(&blkiolat->enable_work);
64940e4996eSChristoph Hellwig blkcg_deactivate_policy(rqos->disk, &blkcg_policy_iolatency);
650d7067512SJosef Bacik kfree(blkiolat);
651d7067512SJosef Bacik }
652d7067512SJosef Bacik
6533963d84dSChristoph Hellwig static const struct rq_qos_ops blkcg_iolatency_ops = {
654d7067512SJosef Bacik .throttle = blkcg_iolatency_throttle,
655d7067512SJosef Bacik .done_bio = blkcg_iolatency_done_bio,
656d7067512SJosef Bacik .exit = blkcg_iolatency_exit,
657d7067512SJosef Bacik };
658d7067512SJosef Bacik
blkiolatency_timer_fn(struct timer_list * t)659d7067512SJosef Bacik static void blkiolatency_timer_fn(struct timer_list *t)
660d7067512SJosef Bacik {
661d7067512SJosef Bacik struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
662d7067512SJosef Bacik struct blkcg_gq *blkg;
663d7067512SJosef Bacik struct cgroup_subsys_state *pos_css;
664d7067512SJosef Bacik u64 now = ktime_to_ns(ktime_get());
665d7067512SJosef Bacik
666d7067512SJosef Bacik rcu_read_lock();
667d7067512SJosef Bacik blkg_for_each_descendant_pre(blkg, pos_css,
6681231039dSChristoph Hellwig blkiolat->rqos.disk->queue->root_blkg) {
669d7067512SJosef Bacik struct iolatency_grp *iolat;
670d7067512SJosef Bacik struct child_latency_info *lat_info;
671d7067512SJosef Bacik unsigned long flags;
672d7067512SJosef Bacik u64 cookie;
673d7067512SJosef Bacik
674d7067512SJosef Bacik /*
675d7067512SJosef Bacik * We could be exiting, don't access the pd unless we have a
676d7067512SJosef Bacik * ref on the blkg.
677d7067512SJosef Bacik */
6787754f669SDennis Zhou if (!blkg_tryget(blkg))
679d7067512SJosef Bacik continue;
680d7067512SJosef Bacik
681d7067512SJosef Bacik iolat = blkg_to_lat(blkg);
682d7067512SJosef Bacik if (!iolat)
68352a1199cSJosef Bacik goto next;
684d7067512SJosef Bacik
685d7067512SJosef Bacik lat_info = &iolat->child_lat;
686d7067512SJosef Bacik cookie = atomic_read(&lat_info->scale_cookie);
687d7067512SJosef Bacik
688d7067512SJosef Bacik if (cookie >= DEFAULT_SCALE_COOKIE)
689d7067512SJosef Bacik goto next;
690d7067512SJosef Bacik
691d7067512SJosef Bacik spin_lock_irqsave(&lat_info->lock, flags);
692d7067512SJosef Bacik if (lat_info->last_scale_event >= now)
693d7067512SJosef Bacik goto next_lock;
694d7067512SJosef Bacik
695d7067512SJosef Bacik /*
696d7067512SJosef Bacik * We scaled down but don't have a scale_grp, scale up and carry
697d7067512SJosef Bacik * on.
698d7067512SJosef Bacik */
699d7067512SJosef Bacik if (lat_info->scale_grp == NULL) {
700d7067512SJosef Bacik scale_cookie_change(iolat->blkiolat, lat_info, true);
701d7067512SJosef Bacik goto next_lock;
702d7067512SJosef Bacik }
703d7067512SJosef Bacik
704d7067512SJosef Bacik /*
705d7067512SJosef Bacik * It's been 5 seconds since our last scale event, clear the
706d7067512SJosef Bacik * scale grp in case the group that needed the scale down isn't
707d7067512SJosef Bacik * doing any IO currently.
708d7067512SJosef Bacik */
709d7067512SJosef Bacik if (now - lat_info->last_scale_event >=
710d7067512SJosef Bacik ((u64)NSEC_PER_SEC * 5))
711d7067512SJosef Bacik lat_info->scale_grp = NULL;
712d7067512SJosef Bacik next_lock:
713d7067512SJosef Bacik spin_unlock_irqrestore(&lat_info->lock, flags);
714d7067512SJosef Bacik next:
715d7067512SJosef Bacik blkg_put(blkg);
716d7067512SJosef Bacik }
717d7067512SJosef Bacik rcu_read_unlock();
718d7067512SJosef Bacik }
719d7067512SJosef Bacik
7208a177a36STejun Heo /**
7218a177a36STejun Heo * blkiolatency_enable_work_fn - Enable or disable iolatency on the device
7228a177a36STejun Heo * @work: enable_work of the blk_iolatency of interest
7238a177a36STejun Heo *
7248a177a36STejun Heo * iolatency needs to keep track of the number of in-flight IOs per cgroup. This
7258a177a36STejun Heo * is relatively expensive as it involves walking up the hierarchy twice for
7268a177a36STejun Heo * every IO. Thus, if iolatency is not enabled in any cgroup for the device, we
7278a177a36STejun Heo * want to disable the in-flight tracking.
7288a177a36STejun Heo *
7298a177a36STejun Heo * We have to make sure that the counting is balanced - we don't want to leak
7308a177a36STejun Heo * the in-flight counts by disabling accounting in the completion path while IOs
7318a177a36STejun Heo * are in flight. This is achieved by ensuring that no IO is in flight by
7328a177a36STejun Heo * freezing the queue while flipping ->enabled. As this requires a sleepable
7338a177a36STejun Heo * context, ->enabled flipping is punted to this work function.
7348a177a36STejun Heo */
blkiolatency_enable_work_fn(struct work_struct * work)7358a177a36STejun Heo static void blkiolatency_enable_work_fn(struct work_struct *work)
7368a177a36STejun Heo {
7378a177a36STejun Heo struct blk_iolatency *blkiolat = container_of(work, struct blk_iolatency,
7388a177a36STejun Heo enable_work);
7398a177a36STejun Heo bool enabled;
7408a177a36STejun Heo
7418a177a36STejun Heo /*
7428a177a36STejun Heo * There can only be one instance of this function running for @blkiolat
7438a177a36STejun Heo * and it's guaranteed to be executed at least once after the latest
7448a177a36STejun Heo * ->enabled_cnt modification. Acting on the latest ->enable_cnt is
7458a177a36STejun Heo * sufficient.
7468a177a36STejun Heo *
7478a177a36STejun Heo * Also, we know @blkiolat is safe to access as ->enable_work is flushed
7488a177a36STejun Heo * in blkcg_iolatency_exit().
7498a177a36STejun Heo */
7508a177a36STejun Heo enabled = atomic_read(&blkiolat->enable_cnt);
7518a177a36STejun Heo if (enabled != blkiolat->enabled) {
752ba91c849SChristoph Hellwig blk_mq_freeze_queue(blkiolat->rqos.disk->queue);
7538a177a36STejun Heo blkiolat->enabled = enabled;
754ba91c849SChristoph Hellwig blk_mq_unfreeze_queue(blkiolat->rqos.disk->queue);
7558a177a36STejun Heo }
7568a177a36STejun Heo }
7578a177a36STejun Heo
blk_iolatency_init(struct gendisk * disk)758a13696b8STejun Heo static int blk_iolatency_init(struct gendisk *disk)
759d7067512SJosef Bacik {
760d7067512SJosef Bacik struct blk_iolatency *blkiolat;
761d7067512SJosef Bacik int ret;
762d7067512SJosef Bacik
763d7067512SJosef Bacik blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
764d7067512SJosef Bacik if (!blkiolat)
765d7067512SJosef Bacik return -ENOMEM;
766d7067512SJosef Bacik
767ce57b558SChristoph Hellwig ret = rq_qos_add(&blkiolat->rqos, disk, RQ_QOS_LATENCY,
768ce57b558SChristoph Hellwig &blkcg_iolatency_ops);
76914a6e2ebSJinke Han if (ret)
77014a6e2ebSJinke Han goto err_free;
77140e4996eSChristoph Hellwig ret = blkcg_activate_policy(disk, &blkcg_policy_iolatency);
77214a6e2ebSJinke Han if (ret)
77314a6e2ebSJinke Han goto err_qos_del;
774d7067512SJosef Bacik
775d7067512SJosef Bacik timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
7768a177a36STejun Heo INIT_WORK(&blkiolat->enable_work, blkiolatency_enable_work_fn);
777d7067512SJosef Bacik
778d7067512SJosef Bacik return 0;
77914a6e2ebSJinke Han
78014a6e2ebSJinke Han err_qos_del:
781ce57b558SChristoph Hellwig rq_qos_del(&blkiolat->rqos);
78214a6e2ebSJinke Han err_free:
78314a6e2ebSJinke Han kfree(blkiolat);
78414a6e2ebSJinke Han return ret;
785d7067512SJosef Bacik }
786d7067512SJosef Bacik
iolatency_set_min_lat_nsec(struct blkcg_gq * blkg,u64 val)7878a177a36STejun Heo static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
788d7067512SJosef Bacik {
789d7067512SJosef Bacik struct iolatency_grp *iolat = blkg_to_lat(blkg);
7908a177a36STejun Heo struct blk_iolatency *blkiolat = iolat->blkiolat;
791d7067512SJosef Bacik u64 oldval = iolat->min_lat_nsec;
792d7067512SJosef Bacik
793d7067512SJosef Bacik iolat->min_lat_nsec = val;
794c480bcf9SDennis Zhou (Facebook) iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
795c480bcf9SDennis Zhou (Facebook) iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
796c480bcf9SDennis Zhou (Facebook) BLKIOLATENCY_MAX_WIN_SIZE);
797d7067512SJosef Bacik
7988a177a36STejun Heo if (!oldval && val) {
7998a177a36STejun Heo if (atomic_inc_return(&blkiolat->enable_cnt) == 1)
8008a177a36STejun Heo schedule_work(&blkiolat->enable_work);
8018a177a36STejun Heo }
8025de0073fSTejun Heo if (oldval && !val) {
8035de0073fSTejun Heo blkcg_clear_delay(blkg);
8048a177a36STejun Heo if (atomic_dec_return(&blkiolat->enable_cnt) == 0)
8058a177a36STejun Heo schedule_work(&blkiolat->enable_work);
8065de0073fSTejun Heo }
807d7067512SJosef Bacik }
808d7067512SJosef Bacik
iolatency_clear_scaling(struct blkcg_gq * blkg)809d7067512SJosef Bacik static void iolatency_clear_scaling(struct blkcg_gq *blkg)
810d7067512SJosef Bacik {
811d7067512SJosef Bacik if (blkg->parent) {
812d7067512SJosef Bacik struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
813d7067512SJosef Bacik struct child_latency_info *lat_info;
814d7067512SJosef Bacik if (!iolat)
815d7067512SJosef Bacik return;
816d7067512SJosef Bacik
817d7067512SJosef Bacik lat_info = &iolat->child_lat;
818d7067512SJosef Bacik spin_lock(&lat_info->lock);
819d7067512SJosef Bacik atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
820d7067512SJosef Bacik lat_info->last_scale_event = 0;
821d7067512SJosef Bacik lat_info->scale_grp = NULL;
822d7067512SJosef Bacik lat_info->scale_lat = 0;
823d7067512SJosef Bacik spin_unlock(&lat_info->lock);
824d7067512SJosef Bacik }
825d7067512SJosef Bacik }
826d7067512SJosef Bacik
iolatency_set_limit(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)827d7067512SJosef Bacik static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
828d7067512SJosef Bacik size_t nbytes, loff_t off)
829d7067512SJosef Bacik {
830d7067512SJosef Bacik struct blkcg *blkcg = css_to_blkcg(of_css(of));
831d7067512SJosef Bacik struct blkcg_gq *blkg;
832d7067512SJosef Bacik struct blkg_conf_ctx ctx;
833d7067512SJosef Bacik struct iolatency_grp *iolat;
834d7067512SJosef Bacik char *p, *tok;
835d7067512SJosef Bacik u64 lat_val = 0;
836d7067512SJosef Bacik u64 oldval;
837d7067512SJosef Bacik int ret;
838d7067512SJosef Bacik
839faffaab2STejun Heo blkg_conf_init(&ctx, buf);
840faffaab2STejun Heo
8414eb44d10SLi Lingfeng ret = blkg_conf_open_bdev(&ctx);
8424eb44d10SLi Lingfeng if (ret)
8434eb44d10SLi Lingfeng goto out;
8444eb44d10SLi Lingfeng
8454eb44d10SLi Lingfeng /*
8464eb44d10SLi Lingfeng * blk_iolatency_init() may fail after rq_qos_add() succeeds which can
8474eb44d10SLi Lingfeng * confuse iolat_rq_qos() test. Make the test and init atomic.
8484eb44d10SLi Lingfeng */
849*18267a03SJens Axboe lockdep_assert_held(&ctx.bdev->bd_queue->rq_qos_mutex);
8504eb44d10SLi Lingfeng if (!iolat_rq_qos(ctx.bdev->bd_queue))
8514eb44d10SLi Lingfeng ret = blk_iolatency_init(ctx.bdev->bd_disk);
852a13696b8STejun Heo if (ret)
853a13696b8STejun Heo goto out;
854a13696b8STejun Heo
855faffaab2STejun Heo ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, &ctx);
856d7067512SJosef Bacik if (ret)
857faffaab2STejun Heo goto out;
858d7067512SJosef Bacik
859d7067512SJosef Bacik iolat = blkg_to_lat(ctx.blkg);
860d7067512SJosef Bacik p = ctx.body;
861d7067512SJosef Bacik
862d7067512SJosef Bacik ret = -EINVAL;
863d7067512SJosef Bacik while ((tok = strsep(&p, " "))) {
864d7067512SJosef Bacik char key[16];
865d7067512SJosef Bacik char val[21]; /* 18446744073709551616 */
866d7067512SJosef Bacik
867d7067512SJosef Bacik if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
868d7067512SJosef Bacik goto out;
869d7067512SJosef Bacik
870d7067512SJosef Bacik if (!strcmp(key, "target")) {
871d7067512SJosef Bacik u64 v;
872d7067512SJosef Bacik
873d7067512SJosef Bacik if (!strcmp(val, "max"))
874d7067512SJosef Bacik lat_val = 0;
875d7067512SJosef Bacik else if (sscanf(val, "%llu", &v) == 1)
876d7067512SJosef Bacik lat_val = v * NSEC_PER_USEC;
877d7067512SJosef Bacik else
878d7067512SJosef Bacik goto out;
879d7067512SJosef Bacik } else {
880d7067512SJosef Bacik goto out;
881d7067512SJosef Bacik }
882d7067512SJosef Bacik }
883d7067512SJosef Bacik
884d7067512SJosef Bacik /* Walk up the tree to see if our new val is lower than it should be. */
885d7067512SJosef Bacik blkg = ctx.blkg;
886d7067512SJosef Bacik oldval = iolat->min_lat_nsec;
887d7067512SJosef Bacik
8888a177a36STejun Heo iolatency_set_min_lat_nsec(blkg, lat_val);
8898a177a36STejun Heo if (oldval != iolat->min_lat_nsec)
890d7067512SJosef Bacik iolatency_clear_scaling(blkg);
891d7067512SJosef Bacik ret = 0;
892d7067512SJosef Bacik out:
893faffaab2STejun Heo blkg_conf_exit(&ctx);
894d7067512SJosef Bacik return ret ?: nbytes;
895d7067512SJosef Bacik }
896d7067512SJosef Bacik
iolatency_prfill_limit(struct seq_file * sf,struct blkg_policy_data * pd,int off)897d7067512SJosef Bacik static u64 iolatency_prfill_limit(struct seq_file *sf,
898d7067512SJosef Bacik struct blkg_policy_data *pd, int off)
899d7067512SJosef Bacik {
900d7067512SJosef Bacik struct iolatency_grp *iolat = pd_to_lat(pd);
901d7067512SJosef Bacik const char *dname = blkg_dev_name(pd->blkg);
902d7067512SJosef Bacik
903d7067512SJosef Bacik if (!dname || !iolat->min_lat_nsec)
904d7067512SJosef Bacik return 0;
905d7067512SJosef Bacik seq_printf(sf, "%s target=%llu\n",
90688b7210cSArnd Bergmann dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
907d7067512SJosef Bacik return 0;
908d7067512SJosef Bacik }
909d7067512SJosef Bacik
iolatency_print_limit(struct seq_file * sf,void * v)910d7067512SJosef Bacik static int iolatency_print_limit(struct seq_file *sf, void *v)
911d7067512SJosef Bacik {
912d7067512SJosef Bacik blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
913d7067512SJosef Bacik iolatency_prfill_limit,
914d7067512SJosef Bacik &blkcg_policy_iolatency, seq_cft(sf)->private, false);
915d7067512SJosef Bacik return 0;
916d7067512SJosef Bacik }
917d7067512SJosef Bacik
iolatency_ssd_stat(struct iolatency_grp * iolat,struct seq_file * s)9183607849dSWolfgang Bumiller static void iolatency_ssd_stat(struct iolatency_grp *iolat, struct seq_file *s)
9191fa2840eSJosef Bacik {
9201fa2840eSJosef Bacik struct latency_stat stat;
9211fa2840eSJosef Bacik int cpu;
9221fa2840eSJosef Bacik
9231fa2840eSJosef Bacik latency_stat_init(iolat, &stat);
9241fa2840eSJosef Bacik preempt_disable();
9251fa2840eSJosef Bacik for_each_online_cpu(cpu) {
9261fa2840eSJosef Bacik struct latency_stat *s;
9271fa2840eSJosef Bacik s = per_cpu_ptr(iolat->stats, cpu);
9281fa2840eSJosef Bacik latency_stat_sum(iolat, &stat, s);
9291fa2840eSJosef Bacik }
9301fa2840eSJosef Bacik preempt_enable();
9311fa2840eSJosef Bacik
932dc572f41SKemeng Shi if (iolat->max_depth == UINT_MAX)
933252c651aSChristoph Hellwig seq_printf(s, " missed=%llu total=%llu depth=max",
9341fa2840eSJosef Bacik (unsigned long long)stat.ps.missed,
9351fa2840eSJosef Bacik (unsigned long long)stat.ps.total);
936252c651aSChristoph Hellwig else
937252c651aSChristoph Hellwig seq_printf(s, " missed=%llu total=%llu depth=%u",
9381fa2840eSJosef Bacik (unsigned long long)stat.ps.missed,
9391fa2840eSJosef Bacik (unsigned long long)stat.ps.total,
940dc572f41SKemeng Shi iolat->max_depth);
9411fa2840eSJosef Bacik }
9421fa2840eSJosef Bacik
iolatency_pd_stat(struct blkg_policy_data * pd,struct seq_file * s)9433607849dSWolfgang Bumiller static void iolatency_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
944d7067512SJosef Bacik {
945d7067512SJosef Bacik struct iolatency_grp *iolat = pd_to_lat(pd);
9461fa2840eSJosef Bacik unsigned long long avg_lat;
9471fa2840eSJosef Bacik unsigned long long cur_win;
948d7067512SJosef Bacik
94907b0fdecSTejun Heo if (!blkcg_debug_stats)
9503607849dSWolfgang Bumiller return;
95107b0fdecSTejun Heo
9521fa2840eSJosef Bacik if (iolat->ssd)
953252c651aSChristoph Hellwig return iolatency_ssd_stat(iolat, s);
9541fa2840eSJosef Bacik
9551fa2840eSJosef Bacik avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
9561fa2840eSJosef Bacik cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
957dc572f41SKemeng Shi if (iolat->max_depth == UINT_MAX)
958252c651aSChristoph Hellwig seq_printf(s, " depth=max avg_lat=%llu win=%llu",
959c480bcf9SDennis Zhou (Facebook) avg_lat, cur_win);
960252c651aSChristoph Hellwig else
961252c651aSChristoph Hellwig seq_printf(s, " depth=%u avg_lat=%llu win=%llu",
962dc572f41SKemeng Shi iolat->max_depth, avg_lat, cur_win);
963d7067512SJosef Bacik }
964d7067512SJosef Bacik
iolatency_pd_alloc(struct gendisk * disk,struct blkcg * blkcg,gfp_t gfp)9650a0b4f79SChristoph Hellwig static struct blkg_policy_data *iolatency_pd_alloc(struct gendisk *disk,
9660a0b4f79SChristoph Hellwig struct blkcg *blkcg, gfp_t gfp)
967d7067512SJosef Bacik {
968d7067512SJosef Bacik struct iolatency_grp *iolat;
969d7067512SJosef Bacik
9700a0b4f79SChristoph Hellwig iolat = kzalloc_node(sizeof(*iolat), gfp, disk->node_id);
971d7067512SJosef Bacik if (!iolat)
972d7067512SJosef Bacik return NULL;
9731fa2840eSJosef Bacik iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
9741fa2840eSJosef Bacik __alignof__(struct latency_stat), gfp);
975d7067512SJosef Bacik if (!iolat->stats) {
976d7067512SJosef Bacik kfree(iolat);
977d7067512SJosef Bacik return NULL;
978d7067512SJosef Bacik }
979d7067512SJosef Bacik return &iolat->pd;
980d7067512SJosef Bacik }
981d7067512SJosef Bacik
iolatency_pd_init(struct blkg_policy_data * pd)982d7067512SJosef Bacik static void iolatency_pd_init(struct blkg_policy_data *pd)
983d7067512SJosef Bacik {
984d7067512SJosef Bacik struct iolatency_grp *iolat = pd_to_lat(pd);
985d7067512SJosef Bacik struct blkcg_gq *blkg = lat_to_blkg(iolat);
98633049187STejun Heo struct rq_qos *rqos = iolat_rq_qos(blkg->q);
987d7067512SJosef Bacik struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
988d7067512SJosef Bacik u64 now = ktime_to_ns(ktime_get());
989d7067512SJosef Bacik int cpu;
990d7067512SJosef Bacik
991a06377c5SChristoph Hellwig if (blk_queue_nonrot(blkg->q))
9921fa2840eSJosef Bacik iolat->ssd = true;
9931fa2840eSJosef Bacik else
9941fa2840eSJosef Bacik iolat->ssd = false;
9951fa2840eSJosef Bacik
996d7067512SJosef Bacik for_each_possible_cpu(cpu) {
9971fa2840eSJosef Bacik struct latency_stat *stat;
998d7067512SJosef Bacik stat = per_cpu_ptr(iolat->stats, cpu);
9991fa2840eSJosef Bacik latency_stat_init(iolat, stat);
1000d7067512SJosef Bacik }
1001d7067512SJosef Bacik
1002451bb7c3SJosef Bacik latency_stat_init(iolat, &iolat->cur_stat);
1003d7067512SJosef Bacik rq_wait_init(&iolat->rq_wait);
1004d7067512SJosef Bacik spin_lock_init(&iolat->child_lat.lock);
1005dc572f41SKemeng Shi iolat->max_depth = UINT_MAX;
1006d7067512SJosef Bacik iolat->blkiolat = blkiolat;
1007d7067512SJosef Bacik iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
1008d7067512SJosef Bacik atomic64_set(&iolat->window_start, now);
1009d7067512SJosef Bacik
1010d7067512SJosef Bacik /*
1011d7067512SJosef Bacik * We init things in list order, so the pd for the parent may not be
1012d7067512SJosef Bacik * init'ed yet for whatever reason.
1013d7067512SJosef Bacik */
1014d7067512SJosef Bacik if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
1015d7067512SJosef Bacik struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
1016d7067512SJosef Bacik atomic_set(&iolat->scale_cookie,
1017d7067512SJosef Bacik atomic_read(&parent->child_lat.scale_cookie));
1018d7067512SJosef Bacik } else {
1019d7067512SJosef Bacik atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
1020d7067512SJosef Bacik }
1021d7067512SJosef Bacik
1022d7067512SJosef Bacik atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
1023d7067512SJosef Bacik }
1024d7067512SJosef Bacik
iolatency_pd_offline(struct blkg_policy_data * pd)1025d7067512SJosef Bacik static void iolatency_pd_offline(struct blkg_policy_data *pd)
1026d7067512SJosef Bacik {
1027d7067512SJosef Bacik struct iolatency_grp *iolat = pd_to_lat(pd);
1028d7067512SJosef Bacik struct blkcg_gq *blkg = lat_to_blkg(iolat);
1029d7067512SJosef Bacik
10308a177a36STejun Heo iolatency_set_min_lat_nsec(blkg, 0);
1031d7067512SJosef Bacik iolatency_clear_scaling(blkg);
1032d7067512SJosef Bacik }
1033d7067512SJosef Bacik
iolatency_pd_free(struct blkg_policy_data * pd)1034d7067512SJosef Bacik static void iolatency_pd_free(struct blkg_policy_data *pd)
1035d7067512SJosef Bacik {
1036d7067512SJosef Bacik struct iolatency_grp *iolat = pd_to_lat(pd);
1037d7067512SJosef Bacik free_percpu(iolat->stats);
1038d7067512SJosef Bacik kfree(iolat);
1039d7067512SJosef Bacik }
1040d7067512SJosef Bacik
1041d7067512SJosef Bacik static struct cftype iolatency_files[] = {
1042d7067512SJosef Bacik {
1043d7067512SJosef Bacik .name = "latency",
1044d7067512SJosef Bacik .flags = CFTYPE_NOT_ON_ROOT,
1045d7067512SJosef Bacik .seq_show = iolatency_print_limit,
1046d7067512SJosef Bacik .write = iolatency_set_limit,
1047d7067512SJosef Bacik },
1048d7067512SJosef Bacik {}
1049d7067512SJosef Bacik };
1050d7067512SJosef Bacik
1051d7067512SJosef Bacik static struct blkcg_policy blkcg_policy_iolatency = {
1052d7067512SJosef Bacik .dfl_cftypes = iolatency_files,
1053d7067512SJosef Bacik .pd_alloc_fn = iolatency_pd_alloc,
1054d7067512SJosef Bacik .pd_init_fn = iolatency_pd_init,
1055d7067512SJosef Bacik .pd_offline_fn = iolatency_pd_offline,
1056d7067512SJosef Bacik .pd_free_fn = iolatency_pd_free,
1057d7067512SJosef Bacik .pd_stat_fn = iolatency_pd_stat,
1058d7067512SJosef Bacik };
1059d7067512SJosef Bacik
iolatency_init(void)1060d7067512SJosef Bacik static int __init iolatency_init(void)
1061d7067512SJosef Bacik {
1062d7067512SJosef Bacik return blkcg_policy_register(&blkcg_policy_iolatency);
1063d7067512SJosef Bacik }
1064d7067512SJosef Bacik
iolatency_exit(void)1065d7067512SJosef Bacik static void __exit iolatency_exit(void)
1066d7067512SJosef Bacik {
1067fa1c3eafSBaolin Wang blkcg_policy_unregister(&blkcg_policy_iolatency);
1068d7067512SJosef Bacik }
1069d7067512SJosef Bacik
1070d7067512SJosef Bacik module_init(iolatency_init);
1071d7067512SJosef Bacik module_exit(iolatency_exit);
1072